provides drivers with a sector number relative to whole device, rather than
having to take partition number into account in order to arrive at the true
sector number. The routine blk_partition_remap() is invoked by
-generic_make_request even before invoking the queue specific make_request_fn,
+generic_make_request even before invoking the queue specific ->submit_bio,
so the i/o scheduler also gets to operate on whole disk sector numbers. This
should typically not require changes to block drivers, it just never gets
to invoke its own partition sector offset calculations since all bios
may both be set on a single bio.
-Implementation details for make_request_fn based block drivers
+Implementation details for bio based block drivers
--------------------------------------------------------------
These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit
struct gendisk *disk;
};
-static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
+static blk_qc_t nfhd_submit_bio(struct bio *bio)
{
struct nfhd_device *dev = bio->bi_disk->private_data;
struct bio_vec bvec;
static const struct block_device_operations nfhd_ops = {
.owner = THIS_MODULE,
+ .submit_bio = nfhd_submit_bio,
.getgeo = nfhd_getgeo,
};
dev->bsize = bsize;
dev->bshift = ffs(bsize) - 10;
- dev->queue = blk_alloc_queue(nfhd_make_request, NUMA_NO_NODE);
+ dev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (dev->queue == NULL)
goto free_dev;
spin_unlock(&dev->lock);
}
-static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t simdisk_submit_bio(struct bio *bio)
{
struct simdisk *dev = bio->bi_disk->private_data;
struct bio_vec bvec;
static const struct block_device_operations simdisk_ops = {
.owner = THIS_MODULE,
+ .submit_bio = simdisk_submit_bio,
.open = simdisk_open,
.release = simdisk_release,
};
spin_lock_init(&dev->lock);
dev->users = 0;
- dev->queue = blk_alloc_queue(simdisk_make_request, NUMA_NO_NODE);
+ dev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (dev->queue == NULL) {
pr_err("blk_alloc_queue failed\n");
goto out_alloc_queue;
* blkcg_init_queue - initialize blkcg part of request queue
* @q: request_queue to initialize
*
- * Called from __blk_alloc_queue(). Responsible for initializing blkcg
+ * Called from blk_alloc_queue(). Responsible for initializing blkcg
* part of new request_queue @q.
*
* RETURNS:
* A block device may call blk_sync_queue to ensure that any
* such activity is cancelled, thus allowing it to release resources
* that the callbacks might use. The caller must already have made sure
- * that its ->make_request_fn will not re-add plugging prior to calling
+ * that its ->submit_bio will not re-add plugging prior to calling
* this function.
*
* This function does not cancel any asynchronous activity arising
{
}
-struct request_queue *__blk_alloc_queue(int node_id)
+struct request_queue *blk_alloc_queue(int node_id)
{
struct request_queue *q;
int ret;
blk_queue_dma_alignment(q, 511);
blk_set_default_limits(&q->limits);
+ q->nr_requests = BLKDEV_MAX_RQ;
return q;
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
-
-struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id)
-{
- struct request_queue *q;
-
- if (WARN_ON_ONCE(!make_request))
- return NULL;
-
- q = __blk_alloc_queue(node_id);
- if (!q)
- return NULL;
- q->make_request_fn = make_request;
- q->nr_requests = BLKDEV_MAX_RQ;
- return q;
-}
EXPORT_SYMBOL(blk_alloc_queue);
/**
static blk_qc_t do_make_request(struct bio *bio)
{
- struct request_queue *q = bio->bi_disk->queue;
+ struct gendisk *disk = bio->bi_disk;
blk_qc_t ret = BLK_QC_T_NONE;
if (blk_crypto_bio_prep(&bio)) {
- if (!q->make_request_fn)
- return blk_mq_make_request(q, bio);
- ret = q->make_request_fn(q, bio);
+ if (!disk->fops->submit_bio)
+ return blk_mq_submit_bio(bio);
+ ret = disk->fops->submit_bio(bio);
}
- blk_queue_exit(q);
+ blk_queue_exit(disk->queue);
return ret;
}
{
/*
* bio_list_on_stack[0] contains bios submitted by the current
- * make_request_fn.
- * bio_list_on_stack[1] contains bios that were submitted before
- * the current make_request_fn, but that haven't been processed
- * yet.
+ * ->submit_bio.
+ * bio_list_on_stack[1] contains bios that were submitted before the
+ * current ->submit_bio_bio, but that haven't been processed yet.
*/
struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
goto out;
/*
- * We only want one ->make_request_fn to be active at a time, else
+ * We only want one ->submit_bio to be active at a time, else
* stack usage with stacked devices could be a problem. So use
* current->bio_list to keep a list of requests submited by a
- * make_request_fn function. current->bio_list is also used as a
+ * ->submit_bio method. current->bio_list is also used as a
* flag to say if generic_make_request is currently active in this
* task or not. If it is NULL, then no make_request is active. If
* it is non-NULL, then a make_request is active, and new requests
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
- * added. ->make_request() may indeed add some more bios
+ * added. ->submit_bio() may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from
- * bio_list, and call into ->make_request() again.
+ * bio_list, and call into ->submit_bio() again.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack[0]);
*/
blk_qc_t direct_make_request(struct bio *bio)
{
- struct request_queue *q = bio->bi_disk->queue;
+ struct gendisk *disk = bio->bi_disk;
- if (WARN_ON_ONCE(q->make_request_fn)) {
+ if (WARN_ON_ONCE(!disk->queue->mq_ops)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
if (unlikely(bio_queue_enter(bio)))
return BLK_QC_T_NONE;
if (!blk_crypto_bio_prep(&bio)) {
- blk_queue_exit(q);
+ blk_queue_exit(disk->queue);
return BLK_QC_T_NONE;
}
- return blk_mq_make_request(q, bio);
+ return blk_mq_submit_bio(bio);
}
EXPORT_SYMBOL_GPL(direct_make_request);
}
/**
- * blk_mq_make_request - Create and send a request to block device.
- * @q: Request queue pointer.
+ * blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
*
* Builds up a request structure from @q and @bio and send to the device. The
*
* Returns: Request queue cookie.
*/
-blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t blk_mq_submit_bio(struct bio *bio)
{
+ struct request_queue *q = bio->bi_disk->queue;
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = {
blk_queue_exit(q);
return BLK_QC_T_NONE;
}
-EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */
+EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx)
{
struct request_queue *uninit_q, *q;
- uninit_q = __blk_alloc_queue(set->numa_node);
+ uninit_q = blk_alloc_queue(set->numa_node);
if (!uninit_q)
return ERR_PTR(-ENOMEM);
uninit_q->queuedata = queuedata;
#endif
}
-struct request_queue *__blk_alloc_queue(int node_id);
-
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);
return err;
}
-static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
+ .submit_bio = brd_submit_bio,
.rw_page = brd_rw_page,
};
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
- brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE);
+ brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE);
if (!brd->brd_queue)
goto out_free_dev;
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
-extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
+extern blk_qc_t drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
- .owner = THIS_MODULE,
- .open = drbd_open,
- .release = drbd_release,
+ .owner = THIS_MODULE,
+ .submit_bio = drbd_submit_bio,
+ .open = drbd_open,
+ .release = drbd_release,
};
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
drbd_init_set_defaults(device);
- q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
goto out_no_q;
device->rq_queue = q;
}
}
-blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_disk->private_data;
unsigned long start_jif;
return &nullb->queues[index];
}
-static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
+static blk_qc_t null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
-static const struct block_device_operations null_ops = {
+static const struct block_device_operations null_bio_ops = {
+ .owner = THIS_MODULE,
+ .submit_bio = null_submit_bio,
+ .report_zones = null_report_zones,
+};
+
+static const struct block_device_operations null_rq_ops = {
.owner = THIS_MODULE,
.report_zones = null_report_zones,
};
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
- disk->fops = &null_ops;
+ if (queue_is_mq(nullb->q))
+ disk->fops = &null_rq_ops;
+ else
+ disk->fops = &null_bio_ops;
disk->private_data = nullb;
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
goto out_cleanup_tags;
}
} else if (dev->queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node);
+ nullb->q = blk_alloc_queue(dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
* block device, assembling the pieces to full packets and queuing them to the
* packet I/O scheduler.
*
- * At the top layer there is a custom make_request_fn function that forwards
+ * At the top layer there is a custom ->submit_bio function that forwards
* read requests directly to the iosched queue and puts write requests in the
* unaligned write queue. A kernel thread performs the necessary read
* gathering to convert the unaligned writes to aligned writes and then feeds
}
}
-static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
blk_queue_split(&bio);
- pd = q->queuedata;
+ pd = bio->bi_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
split = bio;
}
- pkt_make_request_write(q, split);
+ pkt_make_request_write(bio->bi_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
+ .submit_bio = pkt_submit_bio,
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
- disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE);
+ disk->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!disk->queue)
goto out_mem2;
static int ps3vram_major;
-
-static const struct block_device_operations ps3vram_fops = {
- .owner = THIS_MODULE,
-};
-
-
#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
#define DMA_NOTIFIER_SIZE 0x40
return next;
}
-static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
return BLK_QC_T_NONE;
}
+static const struct block_device_operations ps3vram_fops = {
+ .owner = THIS_MODULE,
+ .submit_bio = ps3vram_submit_bio,
+};
+
static int ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
ps3vram_proc_init(dev);
- queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE);
+ queue = blk_alloc_queue(NUMA_NO_NODE);
if (!queue) {
dev_err(&dev->core, "blk_alloc_queue failed\n");
error = -ENOMEM;
static struct kmem_cache *bio_meta_pool;
+static blk_qc_t rsxx_submit_bio(struct bio *bio);
+
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
fmode_t mode,
static const struct block_device_operations rsxx_fops = {
.owner = THIS_MODULE,
+ .submit_bio = rsxx_submit_bio,
.getgeo = rsxx_getgeo,
.ioctl = rsxx_blkdev_ioctl,
};
}
}
-static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_disk->private_data;
struct rsxx_bio_meta *bio_meta;
return -ENOMEM;
}
- card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE);
+ card->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!card->queue) {
dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
unregister_blkdev(card->major, DRIVER_NAME);
return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
}
-static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t mm_submit_bio(struct bio *bio)
{
struct cardinfo *card = bio->bi_disk->private_data;
static const struct block_device_operations mm_fops = {
.owner = THIS_MODULE,
+ .submit_bio = mm_submit_bio,
.getgeo = mm_getgeo,
.revalidate_disk = mm_revalidate,
};
card->biotail = &card->bio;
spin_lock_init(&card->lock);
- card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE);
+ card->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!card->queue)
goto failed_alloc;
}
/*
- * Block layer want one ->make_request_fn to be active at a time
- * so if we use chained IO with parent IO in same context,
- * it's a deadlock. To avoid, it, it uses worker thread context.
+ * Block layer want one ->submit_bio to be active at a time, so if we use
+ * chained IO with parent IO in same context, it's a deadlock. To avoid that,
+ * use a worker thread context.
*/
static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
unsigned long entry, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
+static blk_qc_t zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_disk->private_data;
static const struct block_device_operations zram_devops = {
.open = zram_open,
+ .submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
.rw_page = zram_rw_page,
.owner = THIS_MODULE
#ifdef CONFIG_ZRAM_WRITEBACK
spin_lock_init(&zram->wb_limit_lock);
#endif
- queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE);
+ queue = blk_alloc_queue(NUMA_NO_NODE);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
return tgt_dev;
}
-static const struct block_device_operations nvm_fops = {
- .owner = THIS_MODULE,
-};
-
static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
struct nvm_tgt_type *tt;
goto err_dev;
}
- tqueue = blk_alloc_queue(tt->make_rq, dev->q->node);
+ tqueue = blk_alloc_queue(dev->q->node);
if (!tqueue) {
ret = -ENOMEM;
goto err_disk;
tdisk->flags = GENHD_FL_EXT_DEVT;
tdisk->major = 0;
tdisk->first_minor = 0;
- tdisk->fops = &nvm_fops;
+ tdisk->fops = tt->bops;
tdisk->queue = tqueue;
targetdata = tt->init(tgt_dev, tdisk, create->flags);
struct bio_set pblk_bio_set;
-static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
+static blk_qc_t pblk_submit_bio(struct bio *bio)
{
- struct pblk *pblk = q->queuedata;
+ struct pblk *pblk = bio->bi_disk->queue->queuedata;
if (bio_op(bio) == REQ_OP_DISCARD) {
pblk_discard(pblk, bio);
return BLK_QC_T_NONE;
}
+static const struct block_device_operations pblk_bops = {
+ .owner = THIS_MODULE,
+ .submit_bio = pblk_submit_bio,
+};
+
+
static size_t pblk_trans_map_size(struct pblk *pblk)
{
int entry_size = 8;
.name = "pblk",
.version = {1, 0, 0},
- .make_rq = pblk_make_rq,
+ .bops = &pblk_bops,
.capacity = pblk_capacity,
.init = pblk_init,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_disk->private_data;
continue_at(cl, search_free, NULL);
}
-blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
-blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;
return d->ioctl(d, mode, cmd, arg);
}
-static const struct block_device_operations bcache_ops = {
+static const struct block_device_operations bcache_cached_ops = {
+ .submit_bio = cached_dev_submit_bio,
+ .open = open_dev,
+ .release = release_dev,
+ .ioctl = ioctl_dev,
+ .owner = THIS_MODULE,
+};
+
+static const struct block_device_operations bcache_flash_ops = {
+ .submit_bio = flash_dev_submit_bio,
.open = open_dev,
.release = release_dev,
.ioctl = ioctl_dev,
}
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
- sector_t sectors, make_request_fn make_request_fn,
- struct block_device *cached_bdev)
+ sector_t sectors, struct block_device *cached_bdev,
+ const struct block_device_operations *ops)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
d->disk->major = bcache_major;
d->disk->first_minor = idx_to_first_minor(idx);
- d->disk->fops = &bcache_ops;
+ d->disk->fops = ops;
d->disk->private_data = d;
- q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
return -ENOMEM;
ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
- cached_dev_make_request, dc->bdev);
+ dc->bdev, &bcache_cached_ops);
if (ret)
return ret;
kobject_init(&d->kobj, &bch_flash_dev_ktype);
if (bcache_device_init(d, block_bytes(c), u->sectors,
- flash_dev_make_request, NULL))
+ NULL, &bcache_flash_ops))
goto err;
bcache_device_attach(d, c, u - c->uuids);
}
/*
- * If in ->make_request_fn we need to use blk_queue_split(), otherwise
+ * If in ->queue_bio we need to use blk_queue_split(), otherwise
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
* won't be imposed.
*/
return __split_and_process_bio(md, map, bio);
}
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
/*
* We are called with a live reference on q_usage_counter, but
* that one will be released as soon as we return. Grab an
- * extra one as blk_mq_make_request expects to be able to
- * consume a reference (which lives until the request is freed
- * in case a request is allocated).
+ * extra one as blk_mq_submit_bio expects to be able to consume
+ * a reference (which lives until the request is freed in case a
+ * request is allocated).
*/
- percpu_ref_get(&q->q_usage_counter);
- return blk_mq_make_request(q, bio);
+ percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
+ return blk_mq_submit_bio(bio);
}
map = dm_get_live_table(md, &srcu_idx);
spin_lock_init(&md->uevent_lock);
/*
- * default to bio-based required ->make_request_fn until DM
- * table is loaded and md->type established. If request-based
- * table is loaded: blk-mq will override accordingly.
+ * default to bio-based until DM table is loaded and md->type
+ * established. If request-based table is loaded: blk-mq will
+ * override accordingly.
*/
- md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
+ md->queue = blk_alloc_queue(numa_node_id);
if (!md->queue)
goto bad;
};
static const struct block_device_operations dm_blk_dops = {
+ .submit_bio = dm_submit_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
const int sgrp = op_stat_group(bio_op(bio));
mddev->hold_active = UNTIL_STOP;
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
+ mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
+ .submit_bio = md_submit_bio,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
return err;
}
-static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
static const struct block_device_operations nd_blk_fops = {
.owner = THIS_MODULE,
+ .submit_bio = nd_blk_submit_bio,
.revalidate_disk = nvdimm_revalidate_disk,
};
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
- q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
return -ENOMEM;
if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
return ret;
}
-static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_disk->private_data;
static const struct block_device_operations btt_fops = {
.owner = THIS_MODULE,
+ .submit_bio = btt_submit_bio,
.rw_page = btt_rw_page,
.getgeo = btt_getgeo,
.revalidate_disk = nvdimm_revalidate_disk,
struct nd_namespace_common *ndns = nd_btt->ndns;
/* create a new disk and request queue for btt */
- btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE);
+ btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE);
if (!btt->btt_queue)
return -ENOMEM;
return rc;
}
-static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t pmem_submit_bio(struct bio *bio)
{
int ret = 0;
blk_status_t rc = 0;
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
+ .submit_bio = pmem_submit_bio,
.rw_page = pmem_rw_page,
.revalidate_disk = nvdimm_revalidate_disk,
};
return -EBUSY;
}
- q = blk_alloc_queue(pmem_make_request, dev_to_node(dev));
+ q = blk_alloc_queue(dev_to_node(dev));
if (!q)
return -ENOMEM;
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
+ .submit_bio = nvme_ns_head_submit_bio,
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ioctl,
return false;
}
-static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
- struct bio *bio)
+blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
return 0;
- q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node);
+ q = blk_alloc_queue(ctrl->numa_node);
if (!q)
goto out;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
-static blk_qc_t dcssblk_make_request(struct request_queue *q,
- struct bio *bio);
+static blk_qc_t dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
static int dcssblk_major;
static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
+ .submit_bio = dcssblk_submit_bio,
.open = dcssblk_open,
.release = dcssblk_release,
};
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
- dev_info->dcssblk_queue =
- blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE);
+ dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
}
static blk_qc_t
-dcssblk_make_request(struct request_queue *q, struct bio *bio)
+dcssblk_submit_bio(struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec bvec;
/*
* Block device make request function.
*/
-static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t xpram_submit_bio(struct bio *bio)
{
xpram_device_t *xdev = bio->bi_disk->private_data;
struct bio_vec bvec;
static const struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
+ .submit_bio = xpram_submit_bio,
.getgeo = xpram_getgeo,
};
xpram_disks[i] = alloc_disk(1);
if (!xpram_disks[i])
goto out;
- xpram_queues[i] = blk_alloc_queue(xpram_make_request,
- NUMA_NO_NODE);
+ xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE);
if (!xpram_queues[i]) {
put_disk(xpram_disks[i]);
goto out;
rq->q->mq_ops->cleanup_rq(rq);
}
-blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t blk_mq_submit_bio(struct bio *bio);
#endif
struct blk_queue_ctx;
-typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-
struct bio_vec;
enum blk_eh_timer_return {
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- make_request_fn *make_request_fn;
-
const struct blk_mq_ops *mq_ops;
/* sw queues */
extern void blk_dump_rq_flags(struct request *, char *);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id);
+struct request_queue *blk_alloc_queue(int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
struct block_device_operations {
+ blk_qc_t (*submit_bio) (struct bio *bio);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
return last;
}
-typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
int flags);
int flags;
/* target entry points */
- nvm_tgt_make_rq_fn *make_rq;
+ const struct block_device_operations *bops;
nvm_tgt_capacity_fn *capacity;
/* module-specific init/teardown */