1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <linux/vmalloc.h>
19 #include <uapi/linux/virtio_ring.h>
22 #define VQ_NAME_LEN 16
23 #define MAX_DISCARD_SEGMENTS 256u
25 /* The maximum number of sg elements that fit into a virtqueue */
26 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
28 #ifdef CONFIG_ARCH_NO_SG_CHAIN
29 #define VIRTIO_BLK_INLINE_SG_CNT 0
31 #define VIRTIO_BLK_INLINE_SG_CNT 2
34 static unsigned int num_request_queues;
35 module_param(num_request_queues, uint, 0644);
36 MODULE_PARM_DESC(num_request_queues,
37 "Limit the number of request queues to use for blk device. "
39 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
41 static unsigned int poll_queues;
42 module_param(poll_queues, uint, 0644);
43 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
46 static DEFINE_IDA(vd_index_ida);
48 static struct workqueue_struct *virtblk_wq;
50 struct virtio_blk_vq {
53 char name[VQ_NAME_LEN];
54 } ____cacheline_aligned_in_smp;
58 * This mutex must be held by anything that may run after
59 * virtblk_remove() sets vblk->vdev to NULL.
61 * blk-mq, virtqueue processing, and sysfs attribute code paths are
62 * shut down before vblk->vdev is set to NULL and therefore do not need
65 struct mutex vdev_mutex;
66 struct virtio_device *vdev;
68 /* The disk structure for the kernel. */
71 /* Block layer tags. */
72 struct blk_mq_tag_set tag_set;
74 /* Process context for config space updates */
75 struct work_struct config_work;
77 /* Ida index - used to track minor number allocations. */
82 int io_queues[HCTX_MAX_TYPES];
83 struct virtio_blk_vq *vqs;
85 /* For zoned device */
86 unsigned int zone_sectors;
91 struct virtio_blk_outhdr out_hdr;
98 * The zone append command has an extended in header.
99 * The status field in zone_append_in_hdr must always
110 struct sg_table sg_table;
111 struct scatterlist sg[];
114 static inline blk_status_t virtblk_result(u8 status)
117 case VIRTIO_BLK_S_OK:
119 case VIRTIO_BLK_S_UNSUPP:
120 return BLK_STS_NOTSUPP;
121 case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE:
122 return BLK_STS_ZONE_OPEN_RESOURCE;
123 case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE:
124 return BLK_STS_ZONE_ACTIVE_RESOURCE;
125 case VIRTIO_BLK_S_IOERR:
126 case VIRTIO_BLK_S_ZONE_UNALIGNED_WP:
128 return BLK_STS_IOERR;
132 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
134 struct virtio_blk *vblk = hctx->queue->queuedata;
135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
140 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
142 struct scatterlist out_hdr, in_hdr, *sgs[3];
143 unsigned int num_out = 0, num_in = 0;
145 sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
146 sgs[num_out++] = &out_hdr;
148 if (vbr->sg_table.nents) {
149 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
150 sgs[num_out++] = vbr->sg_table.sgl;
152 sgs[num_out + num_in++] = vbr->sg_table.sgl;
155 sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
156 sgs[num_out + num_in++] = &in_hdr;
158 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
161 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
163 unsigned short segments = blk_rq_nr_discard_segments(req);
164 unsigned short n = 0;
165 struct virtio_blk_discard_write_zeroes *range;
170 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
172 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
177 * Single max discard segment means multi-range discard isn't
178 * supported, and block layer only runs contiguity merge like
179 * normal RW request. So we can't reply on bio for retrieving
182 if (queue_max_discard_segments(req->q) == 1) {
183 range[0].flags = cpu_to_le32(flags);
184 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
185 range[0].sector = cpu_to_le64(blk_rq_pos(req));
188 __rq_for_each_bio(bio, req) {
189 u64 sector = bio->bi_iter.bi_sector;
190 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
192 range[n].flags = cpu_to_le32(flags);
193 range[n].num_sectors = cpu_to_le32(num_sectors);
194 range[n].sector = cpu_to_le64(sector);
199 WARN_ON_ONCE(n != segments);
201 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
202 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
207 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
209 if (blk_rq_nr_phys_segments(req))
210 sg_free_table_chained(&vbr->sg_table,
211 VIRTIO_BLK_INLINE_SG_CNT);
214 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
215 struct virtblk_req *vbr)
219 if (!blk_rq_nr_phys_segments(req))
222 vbr->sg_table.sgl = vbr->sg;
223 err = sg_alloc_table_chained(&vbr->sg_table,
224 blk_rq_nr_phys_segments(req),
226 VIRTIO_BLK_INLINE_SG_CNT);
230 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
233 static void virtblk_cleanup_cmd(struct request *req)
235 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
236 kfree(bvec_virt(&req->special_vec));
239 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
241 struct virtblk_req *vbr)
243 size_t in_hdr_len = sizeof(vbr->in_hdr.status);
248 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
249 return BLK_STS_NOTSUPP;
251 /* Set fields for all request types */
252 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
254 switch (req_op(req)) {
256 type = VIRTIO_BLK_T_IN;
257 sector = blk_rq_pos(req);
260 type = VIRTIO_BLK_T_OUT;
261 sector = blk_rq_pos(req);
264 type = VIRTIO_BLK_T_FLUSH;
267 type = VIRTIO_BLK_T_DISCARD;
269 case REQ_OP_WRITE_ZEROES:
270 type = VIRTIO_BLK_T_WRITE_ZEROES;
271 unmap = !(req->cmd_flags & REQ_NOUNMAP);
273 case REQ_OP_SECURE_ERASE:
274 type = VIRTIO_BLK_T_SECURE_ERASE;
276 case REQ_OP_ZONE_OPEN:
277 type = VIRTIO_BLK_T_ZONE_OPEN;
278 sector = blk_rq_pos(req);
280 case REQ_OP_ZONE_CLOSE:
281 type = VIRTIO_BLK_T_ZONE_CLOSE;
282 sector = blk_rq_pos(req);
284 case REQ_OP_ZONE_FINISH:
285 type = VIRTIO_BLK_T_ZONE_FINISH;
286 sector = blk_rq_pos(req);
288 case REQ_OP_ZONE_APPEND:
289 type = VIRTIO_BLK_T_ZONE_APPEND;
290 sector = blk_rq_pos(req);
291 in_hdr_len = sizeof(vbr->in_hdr.zone_append);
293 case REQ_OP_ZONE_RESET:
294 type = VIRTIO_BLK_T_ZONE_RESET;
295 sector = blk_rq_pos(req);
297 case REQ_OP_ZONE_RESET_ALL:
298 type = VIRTIO_BLK_T_ZONE_RESET_ALL;
302 * Out header has already been prepared by the caller (virtblk_get_id()
303 * or virtblk_submit_zone_report()), nothing to do here.
308 return BLK_STS_IOERR;
311 /* Set fields for non-REQ_OP_DRV_IN request types */
312 vbr->in_hdr_len = in_hdr_len;
313 vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
314 vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector);
316 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
317 type == VIRTIO_BLK_T_SECURE_ERASE) {
318 if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
319 return BLK_STS_RESOURCE;
326 * The status byte is always the last byte of the virtblk request
327 * in-header. This helper fetches its value for all in-header formats
328 * that are currently defined.
330 static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
332 return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
335 static inline void virtblk_request_done(struct request *req)
337 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
338 blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
339 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
341 virtblk_unmap_data(req, vbr);
342 virtblk_cleanup_cmd(req);
344 if (req_op(req) == REQ_OP_ZONE_APPEND)
345 req->__sector = virtio64_to_cpu(vblk->vdev,
346 vbr->in_hdr.zone_append.sector);
348 blk_mq_end_request(req, status);
351 static void virtblk_complete_batch(struct io_comp_batch *iob)
355 rq_list_for_each(&iob->req_list, req) {
356 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
357 virtblk_cleanup_cmd(req);
359 blk_mq_end_request_batch(iob);
362 static int virtblk_handle_req(struct virtio_blk_vq *vq,
363 struct io_comp_batch *iob)
365 struct virtblk_req *vbr;
369 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
370 struct request *req = blk_mq_rq_from_pdu(vbr);
372 if (likely(!blk_should_fake_timeout(req->q)) &&
373 !blk_mq_complete_request_remote(req) &&
374 !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
375 virtblk_complete_batch))
376 virtblk_request_done(req);
383 static void virtblk_done(struct virtqueue *vq)
385 struct virtio_blk *vblk = vq->vdev->priv;
386 struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index];
389 DEFINE_IO_COMP_BATCH(iob);
391 spin_lock_irqsave(&vblk_vq->lock, flags);
393 virtqueue_disable_cb(vq);
394 req_done += virtblk_handle_req(vblk_vq, &iob);
396 if (unlikely(virtqueue_is_broken(vq)))
398 } while (!virtqueue_enable_cb(vq));
401 if (!rq_list_empty(iob.req_list))
404 /* In case queue is stopped waiting for more buffers. */
405 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
407 spin_unlock_irqrestore(&vblk_vq->lock, flags);
410 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
412 struct virtio_blk *vblk = hctx->queue->queuedata;
413 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
416 spin_lock_irq(&vq->lock);
417 kick = virtqueue_kick_prepare(vq->vq);
418 spin_unlock_irq(&vq->lock);
421 virtqueue_notify(vq->vq);
424 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
426 virtblk_cleanup_cmd(req);
429 return BLK_STS_DEV_RESOURCE;
431 return BLK_STS_RESOURCE;
433 return BLK_STS_IOERR;
437 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
438 struct virtio_blk *vblk,
440 struct virtblk_req *vbr)
445 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
446 if (unlikely(status))
449 num = virtblk_map_data(hctx, req, vbr);
450 if (unlikely(num < 0))
451 return virtblk_fail_to_queue(req, -ENOMEM);
452 vbr->sg_table.nents = num;
454 blk_mq_start_request(req);
459 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
460 const struct blk_mq_queue_data *bd)
462 struct virtio_blk *vblk = hctx->queue->queuedata;
463 struct request *req = bd->rq;
464 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
466 int qid = hctx->queue_num;
471 status = virtblk_prep_rq(hctx, vblk, req, vbr);
472 if (unlikely(status))
475 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
476 err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
478 virtqueue_kick(vblk->vqs[qid].vq);
479 /* Don't stop the queue if -ENOMEM: we may have failed to
480 * bounce the buffer due to global resource outage.
483 blk_mq_stop_hw_queue(hctx);
484 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
485 virtblk_unmap_data(req, vbr);
486 return virtblk_fail_to_queue(req, err);
489 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
491 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
494 virtqueue_notify(vblk->vqs[qid].vq);
498 static bool virtblk_prep_rq_batch(struct request *req)
500 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
501 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
503 req->mq_hctx->tags->rqs[req->tag] = req;
505 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
508 static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
509 struct request **rqlist)
515 spin_lock_irqsave(&vq->lock, flags);
517 while (!rq_list_empty(*rqlist)) {
518 struct request *req = rq_list_pop(rqlist);
519 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
521 err = virtblk_add_req(vq->vq, vbr);
523 virtblk_unmap_data(req, vbr);
524 virtblk_cleanup_cmd(req);
525 blk_mq_requeue_request(req, true);
529 kick = virtqueue_kick_prepare(vq->vq);
530 spin_unlock_irqrestore(&vq->lock, flags);
535 static void virtio_queue_rqs(struct request **rqlist)
537 struct request *req, *next, *prev = NULL;
538 struct request *requeue_list = NULL;
540 rq_list_for_each_safe(rqlist, req, next) {
541 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
544 if (!virtblk_prep_rq_batch(req)) {
545 rq_list_move(rqlist, &requeue_list, req, prev);
551 if (!next || req->mq_hctx != next->mq_hctx) {
553 kick = virtblk_add_req_batch(vq, rqlist);
555 virtqueue_notify(vq->vq);
563 *rqlist = requeue_list;
566 #ifdef CONFIG_BLK_DEV_ZONED
567 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
568 unsigned int nr_zones,
571 struct request_queue *q = vblk->disk->queue;
575 nr_zones = min_t(unsigned int, nr_zones,
576 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
578 bufsize = sizeof(struct virtio_blk_zone_report) +
579 nr_zones * sizeof(struct virtio_blk_zone_descriptor);
580 bufsize = min_t(size_t, bufsize,
581 queue_max_hw_sectors(q) << SECTOR_SHIFT);
582 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
584 while (bufsize >= sizeof(struct virtio_blk_zone_report)) {
585 buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
596 static int virtblk_submit_zone_report(struct virtio_blk *vblk,
597 char *report_buf, size_t report_len,
600 struct request_queue *q = vblk->disk->queue;
602 struct virtblk_req *vbr;
605 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
609 vbr = blk_mq_rq_to_pdu(req);
610 vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
611 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
612 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
614 err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
618 blk_execute_rq(req, false);
619 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
621 blk_mq_free_request(req);
625 static int virtblk_parse_zone(struct virtio_blk *vblk,
626 struct virtio_blk_zone_descriptor *entry,
627 unsigned int idx, report_zones_cb cb, void *data)
629 struct blk_zone zone = { };
631 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
632 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
633 zone.len = vblk->zone_sectors;
635 zone.len = get_capacity(vblk->disk) - zone.start;
636 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
637 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
639 switch (entry->z_type) {
640 case VIRTIO_BLK_ZT_SWR:
641 zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
643 case VIRTIO_BLK_ZT_SWP:
644 zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
646 case VIRTIO_BLK_ZT_CONV:
647 zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
650 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
651 zone.start, entry->z_type);
655 switch (entry->z_state) {
656 case VIRTIO_BLK_ZS_EMPTY:
657 zone.cond = BLK_ZONE_COND_EMPTY;
659 case VIRTIO_BLK_ZS_CLOSED:
660 zone.cond = BLK_ZONE_COND_CLOSED;
662 case VIRTIO_BLK_ZS_FULL:
663 zone.cond = BLK_ZONE_COND_FULL;
664 zone.wp = zone.start + zone.len;
666 case VIRTIO_BLK_ZS_EOPEN:
667 zone.cond = BLK_ZONE_COND_EXP_OPEN;
669 case VIRTIO_BLK_ZS_IOPEN:
670 zone.cond = BLK_ZONE_COND_IMP_OPEN;
672 case VIRTIO_BLK_ZS_NOT_WP:
673 zone.cond = BLK_ZONE_COND_NOT_WP;
675 case VIRTIO_BLK_ZS_RDONLY:
676 zone.cond = BLK_ZONE_COND_READONLY;
679 case VIRTIO_BLK_ZS_OFFLINE:
680 zone.cond = BLK_ZONE_COND_OFFLINE;
684 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
685 zone.start, entry->z_state);
690 * The callback below checks the validity of the reported
691 * entry data, no need to further validate it here.
693 return cb(&zone, idx, data);
696 static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
697 unsigned int nr_zones, report_zones_cb cb,
700 struct virtio_blk *vblk = disk->private_data;
701 struct virtio_blk_zone_report *report;
702 unsigned long long nz, i;
704 unsigned int zone_idx = 0;
707 if (WARN_ON_ONCE(!vblk->zone_sectors))
710 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
714 mutex_lock(&vblk->vdev_mutex);
721 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
722 memset(report, 0, buflen);
724 ret = virtblk_submit_zone_report(vblk, (char *)report,
729 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
734 for (i = 0; i < nz && zone_idx < nr_zones; i++) {
735 ret = virtblk_parse_zone(vblk, &report->zones[i],
740 sector = virtio64_to_cpu(vblk->vdev,
741 report->zones[i].z_start) +
752 mutex_unlock(&vblk->vdev_mutex);
757 static void virtblk_revalidate_zones(struct virtio_blk *vblk)
761 virtio_cread(vblk->vdev, struct virtio_blk_config,
762 zoned.model, &model);
765 dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
767 case VIRTIO_BLK_Z_NONE:
768 case VIRTIO_BLK_Z_HA:
769 disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
771 case VIRTIO_BLK_Z_HM:
772 WARN_ON_ONCE(!vblk->zone_sectors);
773 if (!blk_revalidate_disk_zones(vblk->disk, NULL))
774 set_capacity_and_notify(vblk->disk, 0);
778 static int virtblk_probe_zoned_device(struct virtio_device *vdev,
779 struct virtio_blk *vblk,
780 struct request_queue *q)
786 virtio_cread(vdev, struct virtio_blk_config,
787 zoned.model, &model);
790 case VIRTIO_BLK_Z_NONE:
791 case VIRTIO_BLK_Z_HA:
792 /* Present the host-aware device as non-zoned */
794 case VIRTIO_BLK_Z_HM:
797 dev_err(&vdev->dev, "unsupported zone model %d\n", model);
801 dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
803 disk_set_zoned(vblk->disk, BLK_ZONED_HM);
804 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
806 virtio_cread(vdev, struct virtio_blk_config,
807 zoned.max_open_zones, &v);
808 disk_set_max_open_zones(vblk->disk, v);
809 dev_dbg(&vdev->dev, "max open zones = %u\n", v);
811 virtio_cread(vdev, struct virtio_blk_config,
812 zoned.max_active_zones, &v);
813 disk_set_max_active_zones(vblk->disk, v);
814 dev_dbg(&vdev->dev, "max active zones = %u\n", v);
816 virtio_cread(vdev, struct virtio_blk_config,
817 zoned.write_granularity, &wg);
819 dev_warn(&vdev->dev, "zero write granularity reported\n");
822 blk_queue_physical_block_size(q, wg);
823 blk_queue_io_min(q, wg);
825 dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
828 * virtio ZBD specification doesn't require zones to be a power of
829 * two sectors in size, but the code in this driver expects that.
831 virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
832 &vblk->zone_sectors);
833 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
835 "zoned device with non power of two zone size %u\n",
839 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
841 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
842 dev_warn(&vblk->vdev->dev,
843 "ignoring negotiated F_DISCARD for zoned device\n");
844 blk_queue_max_discard_sectors(q, 0);
847 ret = blk_revalidate_disk_zones(vblk->disk, NULL);
849 virtio_cread(vdev, struct virtio_blk_config,
850 zoned.max_append_sectors, &v);
852 dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
855 if ((v << SECTOR_SHIFT) < wg) {
857 "write granularity %u exceeds max_append_sectors %u limit\n",
862 blk_queue_max_zone_append_sectors(q, v);
863 dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
872 * Zoned block device support is not configured in this kernel.
873 * Host-managed zoned devices can't be supported, but others are
874 * good to go as regular block devices.
876 #define virtblk_report_zones NULL
878 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
882 static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
883 struct virtio_blk *vblk, struct request_queue *q)
887 virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
888 if (model == VIRTIO_BLK_Z_HM) {
890 "virtio_blk: zoned devices are not supported");
896 #endif /* CONFIG_BLK_DEV_ZONED */
898 /* return id (s/n) string for *disk to *id_str
900 static int virtblk_get_id(struct gendisk *disk, char *id_str)
902 struct virtio_blk *vblk = disk->private_data;
903 struct request_queue *q = vblk->disk->queue;
905 struct virtblk_req *vbr;
908 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
912 vbr = blk_mq_rq_to_pdu(req);
913 vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
914 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
915 vbr->out_hdr.sector = 0;
917 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
921 blk_execute_rq(req, false);
922 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
924 blk_mq_free_request(req);
928 /* We provide getgeo only to please some old bootloader/partitioning tools */
929 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
931 struct virtio_blk *vblk = bd->bd_disk->private_data;
934 mutex_lock(&vblk->vdev_mutex);
941 /* see if the host passed in geometry config */
942 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
943 virtio_cread(vblk->vdev, struct virtio_blk_config,
944 geometry.cylinders, &geo->cylinders);
945 virtio_cread(vblk->vdev, struct virtio_blk_config,
946 geometry.heads, &geo->heads);
947 virtio_cread(vblk->vdev, struct virtio_blk_config,
948 geometry.sectors, &geo->sectors);
950 /* some standard values, similar to sd */
952 geo->sectors = 1 << 5;
953 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
956 mutex_unlock(&vblk->vdev_mutex);
960 static void virtblk_free_disk(struct gendisk *disk)
962 struct virtio_blk *vblk = disk->private_data;
964 ida_free(&vd_index_ida, vblk->index);
965 mutex_destroy(&vblk->vdev_mutex);
969 static const struct block_device_operations virtblk_fops = {
970 .owner = THIS_MODULE,
971 .getgeo = virtblk_getgeo,
972 .free_disk = virtblk_free_disk,
973 .report_zones = virtblk_report_zones,
976 static int index_to_minor(int index)
978 return index << PART_BITS;
981 static int minor_to_index(int minor)
983 return minor >> PART_BITS;
986 static ssize_t serial_show(struct device *dev,
987 struct device_attribute *attr, char *buf)
989 struct gendisk *disk = dev_to_disk(dev);
992 /* sysfs gives us a PAGE_SIZE buffer */
993 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
995 buf[VIRTIO_BLK_ID_BYTES] = '\0';
996 err = virtblk_get_id(disk, buf);
1000 if (err == -EIO) /* Unsupported? Make it empty. */
1006 static DEVICE_ATTR_RO(serial);
1008 /* The queue's logical block size must be set before calling this */
1009 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
1011 struct virtio_device *vdev = vblk->vdev;
1012 struct request_queue *q = vblk->disk->queue;
1013 char cap_str_2[10], cap_str_10[10];
1014 unsigned long long nblocks;
1017 /* Host must always specify the capacity. */
1018 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
1020 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
1022 string_get_size(nblocks, queue_logical_block_size(q),
1023 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
1024 string_get_size(nblocks, queue_logical_block_size(q),
1025 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
1027 dev_notice(&vdev->dev,
1028 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
1029 vblk->disk->disk_name,
1030 resize ? "new size: " : "",
1032 queue_logical_block_size(q),
1036 set_capacity_and_notify(vblk->disk, capacity);
1039 static void virtblk_config_changed_work(struct work_struct *work)
1041 struct virtio_blk *vblk =
1042 container_of(work, struct virtio_blk, config_work);
1044 virtblk_revalidate_zones(vblk);
1045 virtblk_update_capacity(vblk, true);
1048 static void virtblk_config_changed(struct virtio_device *vdev)
1050 struct virtio_blk *vblk = vdev->priv;
1052 queue_work(virtblk_wq, &vblk->config_work);
1055 static int init_vq(struct virtio_blk *vblk)
1059 vq_callback_t **callbacks;
1061 struct virtqueue **vqs;
1062 unsigned short num_vqs;
1063 unsigned int num_poll_vqs;
1064 struct virtio_device *vdev = vblk->vdev;
1065 struct irq_affinity desc = { 0, };
1067 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
1068 struct virtio_blk_config, num_queues,
1073 if (!err && !num_vqs) {
1074 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
1078 num_vqs = min_t(unsigned int,
1079 min_not_zero(num_request_queues, nr_cpu_ids),
1082 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
1084 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
1085 vblk->io_queues[HCTX_TYPE_READ] = 0;
1086 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
1088 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
1089 vblk->io_queues[HCTX_TYPE_DEFAULT],
1090 vblk->io_queues[HCTX_TYPE_READ],
1091 vblk->io_queues[HCTX_TYPE_POLL]);
1093 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
1097 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
1098 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
1099 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
1100 if (!names || !callbacks || !vqs) {
1105 for (i = 0; i < num_vqs - num_poll_vqs; i++) {
1106 callbacks[i] = virtblk_done;
1107 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
1108 names[i] = vblk->vqs[i].name;
1111 for (; i < num_vqs; i++) {
1112 callbacks[i] = NULL;
1113 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
1114 names[i] = vblk->vqs[i].name;
1117 /* Discover virtqueues and write information to configuration. */
1118 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
1122 for (i = 0; i < num_vqs; i++) {
1123 spin_lock_init(&vblk->vqs[i].lock);
1124 vblk->vqs[i].vq = vqs[i];
1126 vblk->num_vqs = num_vqs;
1138 * Legacy naming scheme used for virtio devices. We are stuck with it for
1139 * virtio blk but don't ever use it for any new driver.
1141 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
1143 const int base = 'z' - 'a' + 1;
1144 char *begin = buf + strlen(prefix);
1145 char *end = buf + buflen;
1155 *--p = 'a' + (index % unit);
1156 index = (index / unit) - 1;
1157 } while (index >= 0);
1159 memmove(begin, p, end - p);
1160 memcpy(buf, prefix, strlen(prefix));
1165 static int virtblk_get_cache_mode(struct virtio_device *vdev)
1170 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
1171 struct virtio_blk_config, wce,
1175 * If WCE is not configurable and flush is not available,
1176 * assume no writeback cache is in use.
1179 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
1184 static void virtblk_update_cache_mode(struct virtio_device *vdev)
1186 u8 writeback = virtblk_get_cache_mode(vdev);
1187 struct virtio_blk *vblk = vdev->priv;
1189 blk_queue_write_cache(vblk->disk->queue, writeback, false);
1192 static const char *const virtblk_cache_types[] = {
1193 "write through", "write back"
1197 cache_type_store(struct device *dev, struct device_attribute *attr,
1198 const char *buf, size_t count)
1200 struct gendisk *disk = dev_to_disk(dev);
1201 struct virtio_blk *vblk = disk->private_data;
1202 struct virtio_device *vdev = vblk->vdev;
1205 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
1206 i = sysfs_match_string(virtblk_cache_types, buf);
1210 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
1211 virtblk_update_cache_mode(vdev);
1216 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1218 struct gendisk *disk = dev_to_disk(dev);
1219 struct virtio_blk *vblk = disk->private_data;
1220 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
1222 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
1223 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
1226 static DEVICE_ATTR_RW(cache_type);
1228 static struct attribute *virtblk_attrs[] = {
1229 &dev_attr_serial.attr,
1230 &dev_attr_cache_type.attr,
1234 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
1235 struct attribute *a, int n)
1237 struct device *dev = kobj_to_dev(kobj);
1238 struct gendisk *disk = dev_to_disk(dev);
1239 struct virtio_blk *vblk = disk->private_data;
1240 struct virtio_device *vdev = vblk->vdev;
1242 if (a == &dev_attr_cache_type.attr &&
1243 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
1249 static const struct attribute_group virtblk_attr_group = {
1250 .attrs = virtblk_attrs,
1251 .is_visible = virtblk_attrs_are_visible,
1254 static const struct attribute_group *virtblk_attr_groups[] = {
1255 &virtblk_attr_group,
1259 static void virtblk_map_queues(struct blk_mq_tag_set *set)
1261 struct virtio_blk *vblk = set->driver_data;
1264 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1265 struct blk_mq_queue_map *map = &set->map[i];
1267 map->nr_queues = vblk->io_queues[i];
1268 map->queue_offset = qoff;
1269 qoff += map->nr_queues;
1271 if (map->nr_queues == 0)
1275 * Regular queues have interrupts and hence CPU affinity is
1276 * defined by the core virtio code, but polling queues have
1277 * no interrupts so we let the block layer assign CPU affinity.
1279 if (i == HCTX_TYPE_POLL)
1280 blk_mq_map_queues(&set->map[i]);
1282 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
1286 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1288 struct virtio_blk *vblk = hctx->queue->queuedata;
1289 struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
1290 unsigned long flags;
1293 spin_lock_irqsave(&vq->lock, flags);
1294 found = virtblk_handle_req(vq, iob);
1297 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
1299 spin_unlock_irqrestore(&vq->lock, flags);
1304 static const struct blk_mq_ops virtio_mq_ops = {
1305 .queue_rq = virtio_queue_rq,
1306 .queue_rqs = virtio_queue_rqs,
1307 .commit_rqs = virtio_commit_rqs,
1308 .complete = virtblk_request_done,
1309 .map_queues = virtblk_map_queues,
1310 .poll = virtblk_poll,
1313 static unsigned int virtblk_queue_depth;
1314 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
1316 static int virtblk_probe(struct virtio_device *vdev)
1318 struct virtio_blk *vblk;
1319 struct request_queue *q;
1322 u32 v, blk_size, max_size, sg_elems, opt_io_size;
1323 u32 max_discard_segs = 0;
1324 u32 discard_granularity = 0;
1326 u8 physical_block_exp, alignment_offset;
1327 unsigned int queue_depth;
1329 if (!vdev->config->get) {
1330 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1335 err = ida_alloc_range(&vd_index_ida, 0,
1336 minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
1341 /* We need to know how many segments before we allocate. */
1342 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
1343 struct virtio_blk_config, seg_max,
1346 /* We need at least one SG element, whatever they say. */
1347 if (err || !sg_elems)
1350 /* Prevent integer overflows and honor max vq size */
1351 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
1353 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
1356 goto out_free_index;
1359 mutex_init(&vblk->vdev_mutex);
1363 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
1365 err = init_vq(vblk);
1369 /* Default queue sizing is to fill the ring. */
1370 if (!virtblk_queue_depth) {
1371 queue_depth = vblk->vqs[0].vq->num_free;
1372 /* ... but without indirect descs, we use 2 descs per req */
1373 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
1376 queue_depth = virtblk_queue_depth;
1379 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
1380 vblk->tag_set.ops = &virtio_mq_ops;
1381 vblk->tag_set.queue_depth = queue_depth;
1382 vblk->tag_set.numa_node = NUMA_NO_NODE;
1383 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1384 vblk->tag_set.cmd_size =
1385 sizeof(struct virtblk_req) +
1386 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
1387 vblk->tag_set.driver_data = vblk;
1388 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
1389 vblk->tag_set.nr_maps = 1;
1390 if (vblk->io_queues[HCTX_TYPE_POLL])
1391 vblk->tag_set.nr_maps = 3;
1393 err = blk_mq_alloc_tag_set(&vblk->tag_set);
1397 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
1398 if (IS_ERR(vblk->disk)) {
1399 err = PTR_ERR(vblk->disk);
1402 q = vblk->disk->queue;
1404 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
1406 vblk->disk->major = major;
1407 vblk->disk->first_minor = index_to_minor(index);
1408 vblk->disk->minors = 1 << PART_BITS;
1409 vblk->disk->private_data = vblk;
1410 vblk->disk->fops = &virtblk_fops;
1411 vblk->index = index;
1413 /* configure queue flush support */
1414 virtblk_update_cache_mode(vdev);
1416 /* If disk is read-only in the host, the guest should obey */
1417 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
1418 set_disk_ro(vblk->disk, 1);
1420 /* We can handle whatever the host told us to handle. */
1421 blk_queue_max_segments(q, sg_elems);
1423 /* No real sector limit. */
1424 blk_queue_max_hw_sectors(q, UINT_MAX);
1426 max_size = virtio_max_dma_size(vdev);
1428 /* Host can optionally specify maximum segment size and number of
1430 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
1431 struct virtio_blk_config, size_max, &v);
1433 max_size = min(max_size, v);
1435 blk_queue_max_segment_size(q, max_size);
1437 /* Host can optionally specify the block size of the device */
1438 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
1439 struct virtio_blk_config, blk_size,
1442 err = blk_validate_block_size(blk_size);
1445 "virtio_blk: invalid block size: 0x%x\n",
1447 goto out_cleanup_disk;
1450 blk_queue_logical_block_size(q, blk_size);
1452 blk_size = queue_logical_block_size(q);
1454 /* Use topology information if available */
1455 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1456 struct virtio_blk_config, physical_block_exp,
1457 &physical_block_exp);
1458 if (!err && physical_block_exp)
1459 blk_queue_physical_block_size(q,
1460 blk_size * (1 << physical_block_exp));
1462 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1463 struct virtio_blk_config, alignment_offset,
1465 if (!err && alignment_offset)
1466 blk_queue_alignment_offset(q, blk_size * alignment_offset);
1468 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1469 struct virtio_blk_config, min_io_size,
1471 if (!err && min_io_size)
1472 blk_queue_io_min(q, blk_size * min_io_size);
1474 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1475 struct virtio_blk_config, opt_io_size,
1477 if (!err && opt_io_size)
1478 blk_queue_io_opt(q, blk_size * opt_io_size);
1480 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
1481 virtio_cread(vdev, struct virtio_blk_config,
1482 discard_sector_alignment, &discard_granularity);
1484 virtio_cread(vdev, struct virtio_blk_config,
1485 max_discard_sectors, &v);
1486 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
1488 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
1492 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1493 virtio_cread(vdev, struct virtio_blk_config,
1494 max_write_zeroes_sectors, &v);
1495 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1498 /* The discard and secure erase limits are combined since the Linux
1499 * block layer uses the same limit for both commands.
1501 * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
1502 * are negotiated, we will use the minimum between the limits.
1504 * discard sector alignment is set to the minimum between discard_sector_alignment
1505 * and secure_erase_sector_alignment.
1507 * max discard sectors is set to the minimum between max_discard_seg and
1508 * max_secure_erase_seg.
1510 if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1512 virtio_cread(vdev, struct virtio_blk_config,
1513 secure_erase_sector_alignment, &v);
1515 /* secure_erase_sector_alignment should not be zero, the device should set a
1516 * valid number of sectors.
1520 "virtio_blk: secure_erase_sector_alignment can't be 0\n");
1522 goto out_cleanup_disk;
1525 discard_granularity = min_not_zero(discard_granularity, v);
1527 virtio_cread(vdev, struct virtio_blk_config,
1528 max_secure_erase_sectors, &v);
1530 /* max_secure_erase_sectors should not be zero, the device should set a
1531 * valid number of sectors.
1535 "virtio_blk: max_secure_erase_sectors can't be 0\n");
1537 goto out_cleanup_disk;
1540 blk_queue_max_secure_erase_sectors(q, v);
1542 virtio_cread(vdev, struct virtio_blk_config,
1543 max_secure_erase_seg, &v);
1545 /* max_secure_erase_seg should not be zero, the device should set a
1546 * valid number of segments
1550 "virtio_blk: max_secure_erase_seg can't be 0\n");
1552 goto out_cleanup_disk;
1555 max_discard_segs = min_not_zero(max_discard_segs, v);
1558 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
1559 virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1560 /* max_discard_seg and discard_granularity will be 0 only
1561 * if max_discard_seg and discard_sector_alignment fields in the virtio
1562 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
1563 * In this case, we use default values.
1565 if (!max_discard_segs)
1566 max_discard_segs = sg_elems;
1568 blk_queue_max_discard_segments(q,
1569 min(max_discard_segs, MAX_DISCARD_SEGMENTS));
1571 if (discard_granularity)
1572 q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
1574 q->limits.discard_granularity = blk_size;
1577 virtblk_update_capacity(vblk, false);
1578 virtio_device_ready(vdev);
1581 * All steps that follow use the VQs therefore they need to be
1582 * placed after the virtio_device_ready() call above.
1584 if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
1585 err = virtblk_probe_zoned_device(vdev, vblk, q);
1587 goto out_cleanup_disk;
1590 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1592 goto out_cleanup_disk;
1597 put_disk(vblk->disk);
1599 blk_mq_free_tag_set(&vblk->tag_set);
1601 vdev->config->del_vqs(vdev);
1606 ida_free(&vd_index_ida, index);
1611 static void virtblk_remove(struct virtio_device *vdev)
1613 struct virtio_blk *vblk = vdev->priv;
1615 /* Make sure no work handler is accessing the device. */
1616 flush_work(&vblk->config_work);
1618 del_gendisk(vblk->disk);
1619 blk_mq_free_tag_set(&vblk->tag_set);
1621 mutex_lock(&vblk->vdev_mutex);
1623 /* Stop all the virtqueues. */
1624 virtio_reset_device(vdev);
1626 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1629 vdev->config->del_vqs(vdev);
1632 mutex_unlock(&vblk->vdev_mutex);
1634 put_disk(vblk->disk);
1637 #ifdef CONFIG_PM_SLEEP
1638 static int virtblk_freeze(struct virtio_device *vdev)
1640 struct virtio_blk *vblk = vdev->priv;
1642 /* Ensure we don't receive any more interrupts */
1643 virtio_reset_device(vdev);
1645 /* Make sure no work handler is accessing the device. */
1646 flush_work(&vblk->config_work);
1648 blk_mq_quiesce_queue(vblk->disk->queue);
1650 vdev->config->del_vqs(vdev);
1656 static int virtblk_restore(struct virtio_device *vdev)
1658 struct virtio_blk *vblk = vdev->priv;
1661 ret = init_vq(vdev->priv);
1665 virtio_device_ready(vdev);
1667 blk_mq_unquiesce_queue(vblk->disk->queue);
1672 static const struct virtio_device_id id_table[] = {
1673 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1677 static unsigned int features_legacy[] = {
1678 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1679 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1680 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1681 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1682 VIRTIO_BLK_F_SECURE_ERASE,
1685 static unsigned int features[] = {
1686 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1687 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1688 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1689 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1690 VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
1693 static struct virtio_driver virtio_blk = {
1694 .feature_table = features,
1695 .feature_table_size = ARRAY_SIZE(features),
1696 .feature_table_legacy = features_legacy,
1697 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1698 .driver.name = KBUILD_MODNAME,
1699 .driver.owner = THIS_MODULE,
1700 .id_table = id_table,
1701 .probe = virtblk_probe,
1702 .remove = virtblk_remove,
1703 .config_changed = virtblk_config_changed,
1704 #ifdef CONFIG_PM_SLEEP
1705 .freeze = virtblk_freeze,
1706 .restore = virtblk_restore,
1710 static int __init virtio_blk_init(void)
1714 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1718 major = register_blkdev(0, "virtblk");
1721 goto out_destroy_workqueue;
1724 error = register_virtio_driver(&virtio_blk);
1726 goto out_unregister_blkdev;
1729 out_unregister_blkdev:
1730 unregister_blkdev(major, "virtblk");
1731 out_destroy_workqueue:
1732 destroy_workqueue(virtblk_wq);
1736 static void __exit virtio_blk_fini(void)
1738 unregister_virtio_driver(&virtio_blk);
1739 unregister_blkdev(major, "virtblk");
1740 destroy_workqueue(virtblk_wq);
1742 module_init(virtio_blk_init);
1743 module_exit(virtio_blk_fini);
1745 MODULE_DEVICE_TABLE(virtio, id_table);
1746 MODULE_DESCRIPTION("Virtio block driver");
1747 MODULE_LICENSE("GPL");