1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <uapi/linux/virtio_ring.h>
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
24 /* The maximum number of sg elements that fit into a virtqueue */
25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
27 #ifdef CONFIG_ARCH_NO_SG_CHAIN
28 #define VIRTIO_BLK_INLINE_SG_CNT 0
30 #define VIRTIO_BLK_INLINE_SG_CNT 2
33 static unsigned int num_request_queues;
34 module_param(num_request_queues, uint, 0644);
35 MODULE_PARM_DESC(num_request_queues,
36 "Limit the number of request queues to use for blk device. "
38 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
41 static DEFINE_IDA(vd_index_ida);
43 static struct workqueue_struct *virtblk_wq;
45 struct virtio_blk_vq {
48 char name[VQ_NAME_LEN];
49 } ____cacheline_aligned_in_smp;
53 * This mutex must be held by anything that may run after
54 * virtblk_remove() sets vblk->vdev to NULL.
56 * blk-mq, virtqueue processing, and sysfs attribute code paths are
57 * shut down before vblk->vdev is set to NULL and therefore do not need
60 struct mutex vdev_mutex;
61 struct virtio_device *vdev;
63 /* The disk structure for the kernel. */
66 /* Block layer tags. */
67 struct blk_mq_tag_set tag_set;
69 /* Process context for config space updates */
70 struct work_struct config_work;
72 /* Ida index - used to track minor number allocations. */
77 struct virtio_blk_vq *vqs;
81 struct virtio_blk_outhdr out_hdr;
83 struct sg_table sg_table;
84 struct scatterlist sg[];
87 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
89 switch (vbr->status) {
92 case VIRTIO_BLK_S_UNSUPP:
93 return BLK_STS_NOTSUPP;
99 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
100 struct scatterlist *data_sg, bool have_data)
102 struct scatterlist hdr, status, *sgs[3];
103 unsigned int num_out = 0, num_in = 0;
105 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
106 sgs[num_out++] = &hdr;
109 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
110 sgs[num_out++] = data_sg;
112 sgs[num_out + num_in++] = data_sg;
115 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
116 sgs[num_out + num_in++] = &status;
118 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
121 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
123 unsigned short segments = blk_rq_nr_discard_segments(req);
124 unsigned short n = 0;
125 struct virtio_blk_discard_write_zeroes *range;
130 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
132 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
137 * Single max discard segment means multi-range discard isn't
138 * supported, and block layer only runs contiguity merge like
139 * normal RW request. So we can't reply on bio for retrieving
142 if (queue_max_discard_segments(req->q) == 1) {
143 range[0].flags = cpu_to_le32(flags);
144 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
145 range[0].sector = cpu_to_le64(blk_rq_pos(req));
148 __rq_for_each_bio(bio, req) {
149 u64 sector = bio->bi_iter.bi_sector;
150 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
152 range[n].flags = cpu_to_le32(flags);
153 range[n].num_sectors = cpu_to_le32(num_sectors);
154 range[n].sector = cpu_to_le64(sector);
159 WARN_ON_ONCE(n != segments);
161 req->special_vec.bv_page = virt_to_page(range);
162 req->special_vec.bv_offset = offset_in_page(range);
163 req->special_vec.bv_len = sizeof(*range) * segments;
164 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
169 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
171 if (blk_rq_nr_phys_segments(req))
172 sg_free_table_chained(&vbr->sg_table,
173 VIRTIO_BLK_INLINE_SG_CNT);
176 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
177 struct virtblk_req *vbr)
181 if (!blk_rq_nr_phys_segments(req))
184 vbr->sg_table.sgl = vbr->sg;
185 err = sg_alloc_table_chained(&vbr->sg_table,
186 blk_rq_nr_phys_segments(req),
188 VIRTIO_BLK_INLINE_SG_CNT);
192 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
195 static void virtblk_cleanup_cmd(struct request *req)
197 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
198 kfree(bvec_virt(&req->special_vec));
201 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
203 struct virtblk_req *vbr)
208 vbr->out_hdr.sector = 0;
210 switch (req_op(req)) {
212 type = VIRTIO_BLK_T_IN;
213 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
217 type = VIRTIO_BLK_T_OUT;
218 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
222 type = VIRTIO_BLK_T_FLUSH;
225 type = VIRTIO_BLK_T_DISCARD;
227 case REQ_OP_WRITE_ZEROES:
228 type = VIRTIO_BLK_T_WRITE_ZEROES;
229 unmap = !(req->cmd_flags & REQ_NOUNMAP);
232 type = VIRTIO_BLK_T_GET_ID;
236 return BLK_STS_IOERR;
239 vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
240 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
242 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
243 if (virtblk_setup_discard_write_zeroes(req, unmap))
244 return BLK_STS_RESOURCE;
250 static inline void virtblk_request_done(struct request *req)
252 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
254 virtblk_unmap_data(req, vbr);
255 virtblk_cleanup_cmd(req);
256 blk_mq_end_request(req, virtblk_result(vbr));
259 static void virtblk_done(struct virtqueue *vq)
261 struct virtio_blk *vblk = vq->vdev->priv;
262 bool req_done = false;
264 struct virtblk_req *vbr;
268 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
270 virtqueue_disable_cb(vq);
271 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
272 struct request *req = blk_mq_rq_from_pdu(vbr);
274 if (likely(!blk_should_fake_timeout(req->q)))
275 blk_mq_complete_request(req);
278 if (unlikely(virtqueue_is_broken(vq)))
280 } while (!virtqueue_enable_cb(vq));
282 /* In case queue is stopped waiting for more buffers. */
284 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
285 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
288 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
290 struct virtio_blk *vblk = hctx->queue->queuedata;
291 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
294 spin_lock_irq(&vq->lock);
295 kick = virtqueue_kick_prepare(vq->vq);
296 spin_unlock_irq(&vq->lock);
299 virtqueue_notify(vq->vq);
302 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
303 const struct blk_mq_queue_data *bd)
305 struct virtio_blk *vblk = hctx->queue->queuedata;
306 struct request *req = bd->rq;
307 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
310 int qid = hctx->queue_num;
315 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
316 if (unlikely(status))
319 blk_mq_start_request(req);
321 num = virtblk_map_data(hctx, req, vbr);
322 if (unlikely(num < 0)) {
323 virtblk_cleanup_cmd(req);
324 return BLK_STS_RESOURCE;
327 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
328 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
330 virtqueue_kick(vblk->vqs[qid].vq);
331 /* Don't stop the queue if -ENOMEM: we may have failed to
332 * bounce the buffer due to global resource outage.
335 blk_mq_stop_hw_queue(hctx);
336 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
337 virtblk_unmap_data(req, vbr);
338 virtblk_cleanup_cmd(req);
341 return BLK_STS_DEV_RESOURCE;
343 return BLK_STS_RESOURCE;
345 return BLK_STS_IOERR;
349 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
351 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
354 virtqueue_notify(vblk->vqs[qid].vq);
358 /* return id (s/n) string for *disk to *id_str
360 static int virtblk_get_id(struct gendisk *disk, char *id_str)
362 struct virtio_blk *vblk = disk->private_data;
363 struct request_queue *q = vblk->disk->queue;
367 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
371 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
375 blk_execute_rq(req, false);
376 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
378 blk_mq_free_request(req);
382 /* We provide getgeo only to please some old bootloader/partitioning tools */
383 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
385 struct virtio_blk *vblk = bd->bd_disk->private_data;
388 mutex_lock(&vblk->vdev_mutex);
395 /* see if the host passed in geometry config */
396 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
397 virtio_cread(vblk->vdev, struct virtio_blk_config,
398 geometry.cylinders, &geo->cylinders);
399 virtio_cread(vblk->vdev, struct virtio_blk_config,
400 geometry.heads, &geo->heads);
401 virtio_cread(vblk->vdev, struct virtio_blk_config,
402 geometry.sectors, &geo->sectors);
404 /* some standard values, similar to sd */
406 geo->sectors = 1 << 5;
407 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
410 mutex_unlock(&vblk->vdev_mutex);
414 static void virtblk_free_disk(struct gendisk *disk)
416 struct virtio_blk *vblk = disk->private_data;
418 ida_simple_remove(&vd_index_ida, vblk->index);
419 mutex_destroy(&vblk->vdev_mutex);
423 static const struct block_device_operations virtblk_fops = {
424 .owner = THIS_MODULE,
425 .getgeo = virtblk_getgeo,
426 .free_disk = virtblk_free_disk,
429 static int index_to_minor(int index)
431 return index << PART_BITS;
434 static int minor_to_index(int minor)
436 return minor >> PART_BITS;
439 static ssize_t serial_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
442 struct gendisk *disk = dev_to_disk(dev);
445 /* sysfs gives us a PAGE_SIZE buffer */
446 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
448 buf[VIRTIO_BLK_ID_BYTES] = '\0';
449 err = virtblk_get_id(disk, buf);
453 if (err == -EIO) /* Unsupported? Make it empty. */
459 static DEVICE_ATTR_RO(serial);
461 /* The queue's logical block size must be set before calling this */
462 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
464 struct virtio_device *vdev = vblk->vdev;
465 struct request_queue *q = vblk->disk->queue;
466 char cap_str_2[10], cap_str_10[10];
467 unsigned long long nblocks;
470 /* Host must always specify the capacity. */
471 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
473 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
475 string_get_size(nblocks, queue_logical_block_size(q),
476 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
477 string_get_size(nblocks, queue_logical_block_size(q),
478 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
480 dev_notice(&vdev->dev,
481 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
482 vblk->disk->disk_name,
483 resize ? "new size: " : "",
485 queue_logical_block_size(q),
489 set_capacity_and_notify(vblk->disk, capacity);
492 static void virtblk_config_changed_work(struct work_struct *work)
494 struct virtio_blk *vblk =
495 container_of(work, struct virtio_blk, config_work);
497 virtblk_update_capacity(vblk, true);
500 static void virtblk_config_changed(struct virtio_device *vdev)
502 struct virtio_blk *vblk = vdev->priv;
504 queue_work(virtblk_wq, &vblk->config_work);
507 static int init_vq(struct virtio_blk *vblk)
511 vq_callback_t **callbacks;
513 struct virtqueue **vqs;
514 unsigned short num_vqs;
515 struct virtio_device *vdev = vblk->vdev;
516 struct irq_affinity desc = { 0, };
518 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
519 struct virtio_blk_config, num_queues,
523 if (!err && !num_vqs) {
524 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
528 num_vqs = min_t(unsigned int,
529 min_not_zero(num_request_queues, nr_cpu_ids),
532 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
536 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
537 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
538 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
539 if (!names || !callbacks || !vqs) {
544 for (i = 0; i < num_vqs; i++) {
545 callbacks[i] = virtblk_done;
546 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
547 names[i] = vblk->vqs[i].name;
550 /* Discover virtqueues and write information to configuration. */
551 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
555 for (i = 0; i < num_vqs; i++) {
556 spin_lock_init(&vblk->vqs[i].lock);
557 vblk->vqs[i].vq = vqs[i];
559 vblk->num_vqs = num_vqs;
571 * Legacy naming scheme used for virtio devices. We are stuck with it for
572 * virtio blk but don't ever use it for any new driver.
574 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
576 const int base = 'z' - 'a' + 1;
577 char *begin = buf + strlen(prefix);
578 char *end = buf + buflen;
588 *--p = 'a' + (index % unit);
589 index = (index / unit) - 1;
590 } while (index >= 0);
592 memmove(begin, p, end - p);
593 memcpy(buf, prefix, strlen(prefix));
598 static int virtblk_get_cache_mode(struct virtio_device *vdev)
603 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
604 struct virtio_blk_config, wce,
608 * If WCE is not configurable and flush is not available,
609 * assume no writeback cache is in use.
612 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
617 static void virtblk_update_cache_mode(struct virtio_device *vdev)
619 u8 writeback = virtblk_get_cache_mode(vdev);
620 struct virtio_blk *vblk = vdev->priv;
622 blk_queue_write_cache(vblk->disk->queue, writeback, false);
625 static const char *const virtblk_cache_types[] = {
626 "write through", "write back"
630 cache_type_store(struct device *dev, struct device_attribute *attr,
631 const char *buf, size_t count)
633 struct gendisk *disk = dev_to_disk(dev);
634 struct virtio_blk *vblk = disk->private_data;
635 struct virtio_device *vdev = vblk->vdev;
638 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
639 i = sysfs_match_string(virtblk_cache_types, buf);
643 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
644 virtblk_update_cache_mode(vdev);
649 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
651 struct gendisk *disk = dev_to_disk(dev);
652 struct virtio_blk *vblk = disk->private_data;
653 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
655 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
656 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
659 static DEVICE_ATTR_RW(cache_type);
661 static struct attribute *virtblk_attrs[] = {
662 &dev_attr_serial.attr,
663 &dev_attr_cache_type.attr,
667 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
668 struct attribute *a, int n)
670 struct device *dev = kobj_to_dev(kobj);
671 struct gendisk *disk = dev_to_disk(dev);
672 struct virtio_blk *vblk = disk->private_data;
673 struct virtio_device *vdev = vblk->vdev;
675 if (a == &dev_attr_cache_type.attr &&
676 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
682 static const struct attribute_group virtblk_attr_group = {
683 .attrs = virtblk_attrs,
684 .is_visible = virtblk_attrs_are_visible,
687 static const struct attribute_group *virtblk_attr_groups[] = {
692 static int virtblk_map_queues(struct blk_mq_tag_set *set)
694 struct virtio_blk *vblk = set->driver_data;
696 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
700 static const struct blk_mq_ops virtio_mq_ops = {
701 .queue_rq = virtio_queue_rq,
702 .commit_rqs = virtio_commit_rqs,
703 .complete = virtblk_request_done,
704 .map_queues = virtblk_map_queues,
707 static unsigned int virtblk_queue_depth;
708 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
710 static int virtblk_probe(struct virtio_device *vdev)
712 struct virtio_blk *vblk;
713 struct request_queue *q;
716 u32 v, blk_size, max_size, sg_elems, opt_io_size;
718 u8 physical_block_exp, alignment_offset;
719 unsigned int queue_depth;
721 if (!vdev->config->get) {
722 dev_err(&vdev->dev, "%s failure: config access disabled\n",
727 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
733 /* We need to know how many segments before we allocate. */
734 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
735 struct virtio_blk_config, seg_max,
738 /* We need at least one SG element, whatever they say. */
739 if (err || !sg_elems)
742 /* Prevent integer overflows and honor max vq size */
743 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
745 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
751 mutex_init(&vblk->vdev_mutex);
755 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
761 /* Default queue sizing is to fill the ring. */
762 if (!virtblk_queue_depth) {
763 queue_depth = vblk->vqs[0].vq->num_free;
764 /* ... but without indirect descs, we use 2 descs per req */
765 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
768 queue_depth = virtblk_queue_depth;
771 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
772 vblk->tag_set.ops = &virtio_mq_ops;
773 vblk->tag_set.queue_depth = queue_depth;
774 vblk->tag_set.numa_node = NUMA_NO_NODE;
775 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
776 vblk->tag_set.cmd_size =
777 sizeof(struct virtblk_req) +
778 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
779 vblk->tag_set.driver_data = vblk;
780 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
782 err = blk_mq_alloc_tag_set(&vblk->tag_set);
786 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
787 if (IS_ERR(vblk->disk)) {
788 err = PTR_ERR(vblk->disk);
791 q = vblk->disk->queue;
793 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
795 vblk->disk->major = major;
796 vblk->disk->first_minor = index_to_minor(index);
797 vblk->disk->minors = 1 << PART_BITS;
798 vblk->disk->private_data = vblk;
799 vblk->disk->fops = &virtblk_fops;
802 /* configure queue flush support */
803 virtblk_update_cache_mode(vdev);
805 /* If disk is read-only in the host, the guest should obey */
806 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
807 set_disk_ro(vblk->disk, 1);
809 /* We can handle whatever the host told us to handle. */
810 blk_queue_max_segments(q, sg_elems);
812 /* No real sector limit. */
813 blk_queue_max_hw_sectors(q, -1U);
815 max_size = virtio_max_dma_size(vdev);
817 /* Host can optionally specify maximum segment size and number of
819 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
820 struct virtio_blk_config, size_max, &v);
822 max_size = min(max_size, v);
824 blk_queue_max_segment_size(q, max_size);
826 /* Host can optionally specify the block size of the device */
827 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
828 struct virtio_blk_config, blk_size,
831 err = blk_validate_block_size(blk_size);
834 "virtio_blk: invalid block size: 0x%x\n",
836 goto out_cleanup_disk;
839 blk_queue_logical_block_size(q, blk_size);
841 blk_size = queue_logical_block_size(q);
843 /* Use topology information if available */
844 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
845 struct virtio_blk_config, physical_block_exp,
846 &physical_block_exp);
847 if (!err && physical_block_exp)
848 blk_queue_physical_block_size(q,
849 blk_size * (1 << physical_block_exp));
851 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
852 struct virtio_blk_config, alignment_offset,
854 if (!err && alignment_offset)
855 blk_queue_alignment_offset(q, blk_size * alignment_offset);
857 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
858 struct virtio_blk_config, min_io_size,
860 if (!err && min_io_size)
861 blk_queue_io_min(q, blk_size * min_io_size);
863 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
864 struct virtio_blk_config, opt_io_size,
866 if (!err && opt_io_size)
867 blk_queue_io_opt(q, blk_size * opt_io_size);
869 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
870 virtio_cread(vdev, struct virtio_blk_config,
871 discard_sector_alignment, &v);
873 q->limits.discard_granularity = v << SECTOR_SHIFT;
875 q->limits.discard_granularity = blk_size;
877 virtio_cread(vdev, struct virtio_blk_config,
878 max_discard_sectors, &v);
879 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
881 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
885 * max_discard_seg == 0 is out of spec but we always
890 blk_queue_max_discard_segments(q,
891 min(v, MAX_DISCARD_SEGMENTS));
894 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
895 virtio_cread(vdev, struct virtio_blk_config,
896 max_write_zeroes_sectors, &v);
897 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
900 virtblk_update_capacity(vblk, false);
901 virtio_device_ready(vdev);
903 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
905 goto out_cleanup_disk;
910 blk_cleanup_disk(vblk->disk);
912 blk_mq_free_tag_set(&vblk->tag_set);
914 vdev->config->del_vqs(vdev);
919 ida_simple_remove(&vd_index_ida, index);
924 static void virtblk_remove(struct virtio_device *vdev)
926 struct virtio_blk *vblk = vdev->priv;
928 /* Make sure no work handler is accessing the device. */
929 flush_work(&vblk->config_work);
931 del_gendisk(vblk->disk);
932 blk_cleanup_queue(vblk->disk->queue);
933 blk_mq_free_tag_set(&vblk->tag_set);
935 mutex_lock(&vblk->vdev_mutex);
937 /* Stop all the virtqueues. */
938 virtio_reset_device(vdev);
940 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
943 vdev->config->del_vqs(vdev);
946 mutex_unlock(&vblk->vdev_mutex);
948 put_disk(vblk->disk);
951 #ifdef CONFIG_PM_SLEEP
952 static int virtblk_freeze(struct virtio_device *vdev)
954 struct virtio_blk *vblk = vdev->priv;
956 /* Ensure we don't receive any more interrupts */
957 virtio_reset_device(vdev);
959 /* Make sure no work handler is accessing the device. */
960 flush_work(&vblk->config_work);
962 blk_mq_quiesce_queue(vblk->disk->queue);
964 vdev->config->del_vqs(vdev);
970 static int virtblk_restore(struct virtio_device *vdev)
972 struct virtio_blk *vblk = vdev->priv;
975 ret = init_vq(vdev->priv);
979 virtio_device_ready(vdev);
981 blk_mq_unquiesce_queue(vblk->disk->queue);
986 static const struct virtio_device_id id_table[] = {
987 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
991 static unsigned int features_legacy[] = {
992 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
993 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
994 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
995 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
998 static unsigned int features[] = {
999 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1000 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1001 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1002 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1005 static struct virtio_driver virtio_blk = {
1006 .feature_table = features,
1007 .feature_table_size = ARRAY_SIZE(features),
1008 .feature_table_legacy = features_legacy,
1009 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1010 .driver.name = KBUILD_MODNAME,
1011 .driver.owner = THIS_MODULE,
1012 .id_table = id_table,
1013 .probe = virtblk_probe,
1014 .remove = virtblk_remove,
1015 .config_changed = virtblk_config_changed,
1016 #ifdef CONFIG_PM_SLEEP
1017 .freeze = virtblk_freeze,
1018 .restore = virtblk_restore,
1022 static int __init virtio_blk_init(void)
1026 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1030 major = register_blkdev(0, "virtblk");
1033 goto out_destroy_workqueue;
1036 error = register_virtio_driver(&virtio_blk);
1038 goto out_unregister_blkdev;
1041 out_unregister_blkdev:
1042 unregister_blkdev(major, "virtblk");
1043 out_destroy_workqueue:
1044 destroy_workqueue(virtblk_wq);
1048 static void __exit virtio_blk_fini(void)
1050 unregister_virtio_driver(&virtio_blk);
1051 unregister_blkdev(major, "virtblk");
1052 destroy_workqueue(virtblk_wq);
1054 module_init(virtio_blk_init);
1055 module_exit(virtio_blk_fini);
1057 MODULE_DEVICE_TABLE(virtio, id_table);
1058 MODULE_DESCRIPTION("Virtio block driver");
1059 MODULE_LICENSE("GPL");