Merge tag '5.20-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux-2.6-microblaze.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <uapi/linux/virtio_ring.h>
19
20 #define PART_BITS 4
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
23
24 /* The maximum number of sg elements that fit into a virtqueue */
25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
26
27 #ifdef CONFIG_ARCH_NO_SG_CHAIN
28 #define VIRTIO_BLK_INLINE_SG_CNT        0
29 #else
30 #define VIRTIO_BLK_INLINE_SG_CNT        2
31 #endif
32
33 static unsigned int num_request_queues;
34 module_param(num_request_queues, uint, 0644);
35 MODULE_PARM_DESC(num_request_queues,
36                  "Limit the number of request queues to use for blk device. "
37                  "0 for no limit. "
38                  "Values > nr_cpu_ids truncated to nr_cpu_ids.");
39
40 static unsigned int poll_queues;
41 module_param(poll_queues, uint, 0644);
42 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
43
44 static int major;
45 static DEFINE_IDA(vd_index_ida);
46
47 static struct workqueue_struct *virtblk_wq;
48
49 struct virtio_blk_vq {
50         struct virtqueue *vq;
51         spinlock_t lock;
52         char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
54
55 struct virtio_blk {
56         /*
57          * This mutex must be held by anything that may run after
58          * virtblk_remove() sets vblk->vdev to NULL.
59          *
60          * blk-mq, virtqueue processing, and sysfs attribute code paths are
61          * shut down before vblk->vdev is set to NULL and therefore do not need
62          * to hold this mutex.
63          */
64         struct mutex vdev_mutex;
65         struct virtio_device *vdev;
66
67         /* The disk structure for the kernel. */
68         struct gendisk *disk;
69
70         /* Block layer tags. */
71         struct blk_mq_tag_set tag_set;
72
73         /* Process context for config space updates */
74         struct work_struct config_work;
75
76         /* Ida index - used to track minor number allocations. */
77         int index;
78
79         /* num of vqs */
80         int num_vqs;
81         int io_queues[HCTX_MAX_TYPES];
82         struct virtio_blk_vq *vqs;
83 };
84
85 struct virtblk_req {
86         struct virtio_blk_outhdr out_hdr;
87         u8 status;
88         struct sg_table sg_table;
89         struct scatterlist sg[];
90 };
91
92 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
93 {
94         switch (vbr->status) {
95         case VIRTIO_BLK_S_OK:
96                 return BLK_STS_OK;
97         case VIRTIO_BLK_S_UNSUPP:
98                 return BLK_STS_NOTSUPP;
99         default:
100                 return BLK_STS_IOERR;
101         }
102 }
103
104 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
105 {
106         struct virtio_blk *vblk = hctx->queue->queuedata;
107         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
108
109         return vq;
110 }
111
112 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
113 {
114         struct scatterlist hdr, status, *sgs[3];
115         unsigned int num_out = 0, num_in = 0;
116
117         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
118         sgs[num_out++] = &hdr;
119
120         if (vbr->sg_table.nents) {
121                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
122                         sgs[num_out++] = vbr->sg_table.sgl;
123                 else
124                         sgs[num_out + num_in++] = vbr->sg_table.sgl;
125         }
126
127         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
128         sgs[num_out + num_in++] = &status;
129
130         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
131 }
132
133 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
134 {
135         unsigned short segments = blk_rq_nr_discard_segments(req);
136         unsigned short n = 0;
137         struct virtio_blk_discard_write_zeroes *range;
138         struct bio *bio;
139         u32 flags = 0;
140
141         if (unmap)
142                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
143
144         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
145         if (!range)
146                 return -ENOMEM;
147
148         /*
149          * Single max discard segment means multi-range discard isn't
150          * supported, and block layer only runs contiguity merge like
151          * normal RW request. So we can't reply on bio for retrieving
152          * each range info.
153          */
154         if (queue_max_discard_segments(req->q) == 1) {
155                 range[0].flags = cpu_to_le32(flags);
156                 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
157                 range[0].sector = cpu_to_le64(blk_rq_pos(req));
158                 n = 1;
159         } else {
160                 __rq_for_each_bio(bio, req) {
161                         u64 sector = bio->bi_iter.bi_sector;
162                         u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
163
164                         range[n].flags = cpu_to_le32(flags);
165                         range[n].num_sectors = cpu_to_le32(num_sectors);
166                         range[n].sector = cpu_to_le64(sector);
167                         n++;
168                 }
169         }
170
171         WARN_ON_ONCE(n != segments);
172
173         req->special_vec.bv_page = virt_to_page(range);
174         req->special_vec.bv_offset = offset_in_page(range);
175         req->special_vec.bv_len = sizeof(*range) * segments;
176         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
177
178         return 0;
179 }
180
181 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
182 {
183         if (blk_rq_nr_phys_segments(req))
184                 sg_free_table_chained(&vbr->sg_table,
185                                       VIRTIO_BLK_INLINE_SG_CNT);
186 }
187
188 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
189                 struct virtblk_req *vbr)
190 {
191         int err;
192
193         if (!blk_rq_nr_phys_segments(req))
194                 return 0;
195
196         vbr->sg_table.sgl = vbr->sg;
197         err = sg_alloc_table_chained(&vbr->sg_table,
198                                      blk_rq_nr_phys_segments(req),
199                                      vbr->sg_table.sgl,
200                                      VIRTIO_BLK_INLINE_SG_CNT);
201         if (unlikely(err))
202                 return -ENOMEM;
203
204         return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
205 }
206
207 static void virtblk_cleanup_cmd(struct request *req)
208 {
209         if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
210                 kfree(bvec_virt(&req->special_vec));
211 }
212
213 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
214                                       struct request *req,
215                                       struct virtblk_req *vbr)
216 {
217         bool unmap = false;
218         u32 type;
219
220         vbr->out_hdr.sector = 0;
221
222         switch (req_op(req)) {
223         case REQ_OP_READ:
224                 type = VIRTIO_BLK_T_IN;
225                 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
226                                                       blk_rq_pos(req));
227                 break;
228         case REQ_OP_WRITE:
229                 type = VIRTIO_BLK_T_OUT;
230                 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
231                                                       blk_rq_pos(req));
232                 break;
233         case REQ_OP_FLUSH:
234                 type = VIRTIO_BLK_T_FLUSH;
235                 break;
236         case REQ_OP_DISCARD:
237                 type = VIRTIO_BLK_T_DISCARD;
238                 break;
239         case REQ_OP_WRITE_ZEROES:
240                 type = VIRTIO_BLK_T_WRITE_ZEROES;
241                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
242                 break;
243         case REQ_OP_DRV_IN:
244                 type = VIRTIO_BLK_T_GET_ID;
245                 break;
246         default:
247                 WARN_ON_ONCE(1);
248                 return BLK_STS_IOERR;
249         }
250
251         vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
252         vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
253
254         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
255                 if (virtblk_setup_discard_write_zeroes(req, unmap))
256                         return BLK_STS_RESOURCE;
257         }
258
259         return 0;
260 }
261
262 static inline void virtblk_request_done(struct request *req)
263 {
264         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
265
266         virtblk_unmap_data(req, vbr);
267         virtblk_cleanup_cmd(req);
268         blk_mq_end_request(req, virtblk_result(vbr));
269 }
270
271 static void virtblk_done(struct virtqueue *vq)
272 {
273         struct virtio_blk *vblk = vq->vdev->priv;
274         bool req_done = false;
275         int qid = vq->index;
276         struct virtblk_req *vbr;
277         unsigned long flags;
278         unsigned int len;
279
280         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
281         do {
282                 virtqueue_disable_cb(vq);
283                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
284                         struct request *req = blk_mq_rq_from_pdu(vbr);
285
286                         if (likely(!blk_should_fake_timeout(req->q)))
287                                 blk_mq_complete_request(req);
288                         req_done = true;
289                 }
290                 if (unlikely(virtqueue_is_broken(vq)))
291                         break;
292         } while (!virtqueue_enable_cb(vq));
293
294         /* In case queue is stopped waiting for more buffers. */
295         if (req_done)
296                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
297         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
298 }
299
300 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
301 {
302         struct virtio_blk *vblk = hctx->queue->queuedata;
303         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
304         bool kick;
305
306         spin_lock_irq(&vq->lock);
307         kick = virtqueue_kick_prepare(vq->vq);
308         spin_unlock_irq(&vq->lock);
309
310         if (kick)
311                 virtqueue_notify(vq->vq);
312 }
313
314 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
315                                         struct virtio_blk *vblk,
316                                         struct request *req,
317                                         struct virtblk_req *vbr)
318 {
319         blk_status_t status;
320
321         status = virtblk_setup_cmd(vblk->vdev, req, vbr);
322         if (unlikely(status))
323                 return status;
324
325         blk_mq_start_request(req);
326
327         vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
328         if (unlikely(vbr->sg_table.nents < 0)) {
329                 virtblk_cleanup_cmd(req);
330                 return BLK_STS_RESOURCE;
331         }
332
333         return BLK_STS_OK;
334 }
335
336 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
337                            const struct blk_mq_queue_data *bd)
338 {
339         struct virtio_blk *vblk = hctx->queue->queuedata;
340         struct request *req = bd->rq;
341         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
342         unsigned long flags;
343         int qid = hctx->queue_num;
344         bool notify = false;
345         blk_status_t status;
346         int err;
347
348         status = virtblk_prep_rq(hctx, vblk, req, vbr);
349         if (unlikely(status))
350                 return status;
351
352         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
353         err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
354         if (err) {
355                 virtqueue_kick(vblk->vqs[qid].vq);
356                 /* Don't stop the queue if -ENOMEM: we may have failed to
357                  * bounce the buffer due to global resource outage.
358                  */
359                 if (err == -ENOSPC)
360                         blk_mq_stop_hw_queue(hctx);
361                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
362                 virtblk_unmap_data(req, vbr);
363                 virtblk_cleanup_cmd(req);
364                 switch (err) {
365                 case -ENOSPC:
366                         return BLK_STS_DEV_RESOURCE;
367                 case -ENOMEM:
368                         return BLK_STS_RESOURCE;
369                 default:
370                         return BLK_STS_IOERR;
371                 }
372         }
373
374         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
375                 notify = true;
376         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
377
378         if (notify)
379                 virtqueue_notify(vblk->vqs[qid].vq);
380         return BLK_STS_OK;
381 }
382
383 static bool virtblk_prep_rq_batch(struct request *req)
384 {
385         struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
386         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
387
388         req->mq_hctx->tags->rqs[req->tag] = req;
389
390         return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
391 }
392
393 static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
394                                         struct request **rqlist,
395                                         struct request **requeue_list)
396 {
397         unsigned long flags;
398         int err;
399         bool kick;
400
401         spin_lock_irqsave(&vq->lock, flags);
402
403         while (!rq_list_empty(*rqlist)) {
404                 struct request *req = rq_list_pop(rqlist);
405                 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
406
407                 err = virtblk_add_req(vq->vq, vbr);
408                 if (err) {
409                         virtblk_unmap_data(req, vbr);
410                         virtblk_cleanup_cmd(req);
411                         rq_list_add(requeue_list, req);
412                 }
413         }
414
415         kick = virtqueue_kick_prepare(vq->vq);
416         spin_unlock_irqrestore(&vq->lock, flags);
417
418         return kick;
419 }
420
421 static void virtio_queue_rqs(struct request **rqlist)
422 {
423         struct request *req, *next, *prev = NULL;
424         struct request *requeue_list = NULL;
425
426         rq_list_for_each_safe(rqlist, req, next) {
427                 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
428                 bool kick;
429
430                 if (!virtblk_prep_rq_batch(req)) {
431                         rq_list_move(rqlist, &requeue_list, req, prev);
432                         req = prev;
433                         if (!req)
434                                 continue;
435                 }
436
437                 if (!next || req->mq_hctx != next->mq_hctx) {
438                         req->rq_next = NULL;
439                         kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
440                         if (kick)
441                                 virtqueue_notify(vq->vq);
442
443                         *rqlist = next;
444                         prev = NULL;
445                 } else
446                         prev = req;
447         }
448
449         *rqlist = requeue_list;
450 }
451
452 /* return id (s/n) string for *disk to *id_str
453  */
454 static int virtblk_get_id(struct gendisk *disk, char *id_str)
455 {
456         struct virtio_blk *vblk = disk->private_data;
457         struct request_queue *q = vblk->disk->queue;
458         struct request *req;
459         int err;
460
461         req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
462         if (IS_ERR(req))
463                 return PTR_ERR(req);
464
465         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
466         if (err)
467                 goto out;
468
469         blk_execute_rq(req, false);
470         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
471 out:
472         blk_mq_free_request(req);
473         return err;
474 }
475
476 /* We provide getgeo only to please some old bootloader/partitioning tools */
477 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
478 {
479         struct virtio_blk *vblk = bd->bd_disk->private_data;
480         int ret = 0;
481
482         mutex_lock(&vblk->vdev_mutex);
483
484         if (!vblk->vdev) {
485                 ret = -ENXIO;
486                 goto out;
487         }
488
489         /* see if the host passed in geometry config */
490         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
491                 virtio_cread(vblk->vdev, struct virtio_blk_config,
492                              geometry.cylinders, &geo->cylinders);
493                 virtio_cread(vblk->vdev, struct virtio_blk_config,
494                              geometry.heads, &geo->heads);
495                 virtio_cread(vblk->vdev, struct virtio_blk_config,
496                              geometry.sectors, &geo->sectors);
497         } else {
498                 /* some standard values, similar to sd */
499                 geo->heads = 1 << 6;
500                 geo->sectors = 1 << 5;
501                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
502         }
503 out:
504         mutex_unlock(&vblk->vdev_mutex);
505         return ret;
506 }
507
508 static void virtblk_free_disk(struct gendisk *disk)
509 {
510         struct virtio_blk *vblk = disk->private_data;
511
512         ida_simple_remove(&vd_index_ida, vblk->index);
513         mutex_destroy(&vblk->vdev_mutex);
514         kfree(vblk);
515 }
516
517 static const struct block_device_operations virtblk_fops = {
518         .owner          = THIS_MODULE,
519         .getgeo         = virtblk_getgeo,
520         .free_disk      = virtblk_free_disk,
521 };
522
523 static int index_to_minor(int index)
524 {
525         return index << PART_BITS;
526 }
527
528 static int minor_to_index(int minor)
529 {
530         return minor >> PART_BITS;
531 }
532
533 static ssize_t serial_show(struct device *dev,
534                            struct device_attribute *attr, char *buf)
535 {
536         struct gendisk *disk = dev_to_disk(dev);
537         int err;
538
539         /* sysfs gives us a PAGE_SIZE buffer */
540         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
541
542         buf[VIRTIO_BLK_ID_BYTES] = '\0';
543         err = virtblk_get_id(disk, buf);
544         if (!err)
545                 return strlen(buf);
546
547         if (err == -EIO) /* Unsupported? Make it empty. */
548                 return 0;
549
550         return err;
551 }
552
553 static DEVICE_ATTR_RO(serial);
554
555 /* The queue's logical block size must be set before calling this */
556 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
557 {
558         struct virtio_device *vdev = vblk->vdev;
559         struct request_queue *q = vblk->disk->queue;
560         char cap_str_2[10], cap_str_10[10];
561         unsigned long long nblocks;
562         u64 capacity;
563
564         /* Host must always specify the capacity. */
565         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
566
567         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
568
569         string_get_size(nblocks, queue_logical_block_size(q),
570                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
571         string_get_size(nblocks, queue_logical_block_size(q),
572                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
573
574         dev_notice(&vdev->dev,
575                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
576                    vblk->disk->disk_name,
577                    resize ? "new size: " : "",
578                    nblocks,
579                    queue_logical_block_size(q),
580                    cap_str_10,
581                    cap_str_2);
582
583         set_capacity_and_notify(vblk->disk, capacity);
584 }
585
586 static void virtblk_config_changed_work(struct work_struct *work)
587 {
588         struct virtio_blk *vblk =
589                 container_of(work, struct virtio_blk, config_work);
590
591         virtblk_update_capacity(vblk, true);
592 }
593
594 static void virtblk_config_changed(struct virtio_device *vdev)
595 {
596         struct virtio_blk *vblk = vdev->priv;
597
598         queue_work(virtblk_wq, &vblk->config_work);
599 }
600
601 static int init_vq(struct virtio_blk *vblk)
602 {
603         int err;
604         int i;
605         vq_callback_t **callbacks;
606         const char **names;
607         struct virtqueue **vqs;
608         unsigned short num_vqs;
609         unsigned int num_poll_vqs;
610         struct virtio_device *vdev = vblk->vdev;
611         struct irq_affinity desc = { 0, };
612
613         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
614                                    struct virtio_blk_config, num_queues,
615                                    &num_vqs);
616         if (err)
617                 num_vqs = 1;
618
619         if (!err && !num_vqs) {
620                 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
621                 return -EINVAL;
622         }
623
624         num_vqs = min_t(unsigned int,
625                         min_not_zero(num_request_queues, nr_cpu_ids),
626                         num_vqs);
627
628         num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
629
630         vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
631         vblk->io_queues[HCTX_TYPE_READ] = 0;
632         vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
633
634         dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
635                                 vblk->io_queues[HCTX_TYPE_DEFAULT],
636                                 vblk->io_queues[HCTX_TYPE_READ],
637                                 vblk->io_queues[HCTX_TYPE_POLL]);
638
639         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
640         if (!vblk->vqs)
641                 return -ENOMEM;
642
643         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
644         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
645         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
646         if (!names || !callbacks || !vqs) {
647                 err = -ENOMEM;
648                 goto out;
649         }
650
651         for (i = 0; i < num_vqs - num_poll_vqs; i++) {
652                 callbacks[i] = virtblk_done;
653                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
654                 names[i] = vblk->vqs[i].name;
655         }
656
657         for (; i < num_vqs; i++) {
658                 callbacks[i] = NULL;
659                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
660                 names[i] = vblk->vqs[i].name;
661         }
662
663         /* Discover virtqueues and write information to configuration.  */
664         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
665         if (err)
666                 goto out;
667
668         for (i = 0; i < num_vqs; i++) {
669                 spin_lock_init(&vblk->vqs[i].lock);
670                 vblk->vqs[i].vq = vqs[i];
671         }
672         vblk->num_vqs = num_vqs;
673
674 out:
675         kfree(vqs);
676         kfree(callbacks);
677         kfree(names);
678         if (err)
679                 kfree(vblk->vqs);
680         return err;
681 }
682
683 /*
684  * Legacy naming scheme used for virtio devices.  We are stuck with it for
685  * virtio blk but don't ever use it for any new driver.
686  */
687 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
688 {
689         const int base = 'z' - 'a' + 1;
690         char *begin = buf + strlen(prefix);
691         char *end = buf + buflen;
692         char *p;
693         int unit;
694
695         p = end - 1;
696         *p = '\0';
697         unit = base;
698         do {
699                 if (p == begin)
700                         return -EINVAL;
701                 *--p = 'a' + (index % unit);
702                 index = (index / unit) - 1;
703         } while (index >= 0);
704
705         memmove(begin, p, end - p);
706         memcpy(buf, prefix, strlen(prefix));
707
708         return 0;
709 }
710
711 static int virtblk_get_cache_mode(struct virtio_device *vdev)
712 {
713         u8 writeback;
714         int err;
715
716         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
717                                    struct virtio_blk_config, wce,
718                                    &writeback);
719
720         /*
721          * If WCE is not configurable and flush is not available,
722          * assume no writeback cache is in use.
723          */
724         if (err)
725                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
726
727         return writeback;
728 }
729
730 static void virtblk_update_cache_mode(struct virtio_device *vdev)
731 {
732         u8 writeback = virtblk_get_cache_mode(vdev);
733         struct virtio_blk *vblk = vdev->priv;
734
735         blk_queue_write_cache(vblk->disk->queue, writeback, false);
736 }
737
738 static const char *const virtblk_cache_types[] = {
739         "write through", "write back"
740 };
741
742 static ssize_t
743 cache_type_store(struct device *dev, struct device_attribute *attr,
744                  const char *buf, size_t count)
745 {
746         struct gendisk *disk = dev_to_disk(dev);
747         struct virtio_blk *vblk = disk->private_data;
748         struct virtio_device *vdev = vblk->vdev;
749         int i;
750
751         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
752         i = sysfs_match_string(virtblk_cache_types, buf);
753         if (i < 0)
754                 return i;
755
756         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
757         virtblk_update_cache_mode(vdev);
758         return count;
759 }
760
761 static ssize_t
762 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
763 {
764         struct gendisk *disk = dev_to_disk(dev);
765         struct virtio_blk *vblk = disk->private_data;
766         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
767
768         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
769         return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
770 }
771
772 static DEVICE_ATTR_RW(cache_type);
773
774 static struct attribute *virtblk_attrs[] = {
775         &dev_attr_serial.attr,
776         &dev_attr_cache_type.attr,
777         NULL,
778 };
779
780 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
781                 struct attribute *a, int n)
782 {
783         struct device *dev = kobj_to_dev(kobj);
784         struct gendisk *disk = dev_to_disk(dev);
785         struct virtio_blk *vblk = disk->private_data;
786         struct virtio_device *vdev = vblk->vdev;
787
788         if (a == &dev_attr_cache_type.attr &&
789             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
790                 return S_IRUGO;
791
792         return a->mode;
793 }
794
795 static const struct attribute_group virtblk_attr_group = {
796         .attrs = virtblk_attrs,
797         .is_visible = virtblk_attrs_are_visible,
798 };
799
800 static const struct attribute_group *virtblk_attr_groups[] = {
801         &virtblk_attr_group,
802         NULL,
803 };
804
805 static int virtblk_map_queues(struct blk_mq_tag_set *set)
806 {
807         struct virtio_blk *vblk = set->driver_data;
808         int i, qoff;
809
810         for (i = 0, qoff = 0; i < set->nr_maps; i++) {
811                 struct blk_mq_queue_map *map = &set->map[i];
812
813                 map->nr_queues = vblk->io_queues[i];
814                 map->queue_offset = qoff;
815                 qoff += map->nr_queues;
816
817                 if (map->nr_queues == 0)
818                         continue;
819
820                 /*
821                  * Regular queues have interrupts and hence CPU affinity is
822                  * defined by the core virtio code, but polling queues have
823                  * no interrupts so we let the block layer assign CPU affinity.
824                  */
825                 if (i == HCTX_TYPE_POLL)
826                         blk_mq_map_queues(&set->map[i]);
827                 else
828                         blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
829         }
830
831         return 0;
832 }
833
834 static void virtblk_complete_batch(struct io_comp_batch *iob)
835 {
836         struct request *req;
837
838         rq_list_for_each(&iob->req_list, req) {
839                 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
840                 virtblk_cleanup_cmd(req);
841         }
842         blk_mq_end_request_batch(iob);
843 }
844
845 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
846 {
847         struct virtio_blk *vblk = hctx->queue->queuedata;
848         struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
849         struct virtblk_req *vbr;
850         unsigned long flags;
851         unsigned int len;
852         int found = 0;
853
854         spin_lock_irqsave(&vq->lock, flags);
855
856         while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
857                 struct request *req = blk_mq_rq_from_pdu(vbr);
858
859                 found++;
860                 if (!blk_mq_add_to_batch(req, iob, vbr->status,
861                                                 virtblk_complete_batch))
862                         blk_mq_complete_request(req);
863         }
864
865         if (found)
866                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
867
868         spin_unlock_irqrestore(&vq->lock, flags);
869
870         return found;
871 }
872
873 static const struct blk_mq_ops virtio_mq_ops = {
874         .queue_rq       = virtio_queue_rq,
875         .queue_rqs      = virtio_queue_rqs,
876         .commit_rqs     = virtio_commit_rqs,
877         .complete       = virtblk_request_done,
878         .map_queues     = virtblk_map_queues,
879         .poll           = virtblk_poll,
880 };
881
882 static unsigned int virtblk_queue_depth;
883 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
884
885 static int virtblk_probe(struct virtio_device *vdev)
886 {
887         struct virtio_blk *vblk;
888         struct request_queue *q;
889         int err, index;
890
891         u32 v, blk_size, max_size, sg_elems, opt_io_size;
892         u16 min_io_size;
893         u8 physical_block_exp, alignment_offset;
894         unsigned int queue_depth;
895
896         if (!vdev->config->get) {
897                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
898                         __func__);
899                 return -EINVAL;
900         }
901
902         err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
903                              GFP_KERNEL);
904         if (err < 0)
905                 goto out;
906         index = err;
907
908         /* We need to know how many segments before we allocate. */
909         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
910                                    struct virtio_blk_config, seg_max,
911                                    &sg_elems);
912
913         /* We need at least one SG element, whatever they say. */
914         if (err || !sg_elems)
915                 sg_elems = 1;
916
917         /* Prevent integer overflows and honor max vq size */
918         sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
919
920         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
921         if (!vblk) {
922                 err = -ENOMEM;
923                 goto out_free_index;
924         }
925
926         mutex_init(&vblk->vdev_mutex);
927
928         vblk->vdev = vdev;
929
930         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
931
932         err = init_vq(vblk);
933         if (err)
934                 goto out_free_vblk;
935
936         /* Default queue sizing is to fill the ring. */
937         if (!virtblk_queue_depth) {
938                 queue_depth = vblk->vqs[0].vq->num_free;
939                 /* ... but without indirect descs, we use 2 descs per req */
940                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
941                         queue_depth /= 2;
942         } else {
943                 queue_depth = virtblk_queue_depth;
944         }
945
946         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
947         vblk->tag_set.ops = &virtio_mq_ops;
948         vblk->tag_set.queue_depth = queue_depth;
949         vblk->tag_set.numa_node = NUMA_NO_NODE;
950         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
951         vblk->tag_set.cmd_size =
952                 sizeof(struct virtblk_req) +
953                 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
954         vblk->tag_set.driver_data = vblk;
955         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
956         vblk->tag_set.nr_maps = 1;
957         if (vblk->io_queues[HCTX_TYPE_POLL])
958                 vblk->tag_set.nr_maps = 3;
959
960         err = blk_mq_alloc_tag_set(&vblk->tag_set);
961         if (err)
962                 goto out_free_vq;
963
964         vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
965         if (IS_ERR(vblk->disk)) {
966                 err = PTR_ERR(vblk->disk);
967                 goto out_free_tags;
968         }
969         q = vblk->disk->queue;
970
971         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
972
973         vblk->disk->major = major;
974         vblk->disk->first_minor = index_to_minor(index);
975         vblk->disk->minors = 1 << PART_BITS;
976         vblk->disk->private_data = vblk;
977         vblk->disk->fops = &virtblk_fops;
978         vblk->index = index;
979
980         /* configure queue flush support */
981         virtblk_update_cache_mode(vdev);
982
983         /* If disk is read-only in the host, the guest should obey */
984         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
985                 set_disk_ro(vblk->disk, 1);
986
987         /* We can handle whatever the host told us to handle. */
988         blk_queue_max_segments(q, sg_elems);
989
990         /* No real sector limit. */
991         blk_queue_max_hw_sectors(q, -1U);
992
993         max_size = virtio_max_dma_size(vdev);
994
995         /* Host can optionally specify maximum segment size and number of
996          * segments. */
997         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
998                                    struct virtio_blk_config, size_max, &v);
999         if (!err)
1000                 max_size = min(max_size, v);
1001
1002         blk_queue_max_segment_size(q, max_size);
1003
1004         /* Host can optionally specify the block size of the device */
1005         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
1006                                    struct virtio_blk_config, blk_size,
1007                                    &blk_size);
1008         if (!err) {
1009                 err = blk_validate_block_size(blk_size);
1010                 if (err) {
1011                         dev_err(&vdev->dev,
1012                                 "virtio_blk: invalid block size: 0x%x\n",
1013                                 blk_size);
1014                         goto out_cleanup_disk;
1015                 }
1016
1017                 blk_queue_logical_block_size(q, blk_size);
1018         } else
1019                 blk_size = queue_logical_block_size(q);
1020
1021         /* Use topology information if available */
1022         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1023                                    struct virtio_blk_config, physical_block_exp,
1024                                    &physical_block_exp);
1025         if (!err && physical_block_exp)
1026                 blk_queue_physical_block_size(q,
1027                                 blk_size * (1 << physical_block_exp));
1028
1029         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1030                                    struct virtio_blk_config, alignment_offset,
1031                                    &alignment_offset);
1032         if (!err && alignment_offset)
1033                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
1034
1035         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1036                                    struct virtio_blk_config, min_io_size,
1037                                    &min_io_size);
1038         if (!err && min_io_size)
1039                 blk_queue_io_min(q, blk_size * min_io_size);
1040
1041         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1042                                    struct virtio_blk_config, opt_io_size,
1043                                    &opt_io_size);
1044         if (!err && opt_io_size)
1045                 blk_queue_io_opt(q, blk_size * opt_io_size);
1046
1047         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
1048                 virtio_cread(vdev, struct virtio_blk_config,
1049                              discard_sector_alignment, &v);
1050                 if (v)
1051                         q->limits.discard_granularity = v << SECTOR_SHIFT;
1052                 else
1053                         q->limits.discard_granularity = blk_size;
1054
1055                 virtio_cread(vdev, struct virtio_blk_config,
1056                              max_discard_sectors, &v);
1057                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
1058
1059                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
1060                              &v);
1061
1062                 /*
1063                  * max_discard_seg == 0 is out of spec but we always
1064                  * handled it.
1065                  */
1066                 if (!v)
1067                         v = sg_elems;
1068                 blk_queue_max_discard_segments(q,
1069                                                min(v, MAX_DISCARD_SEGMENTS));
1070         }
1071
1072         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1073                 virtio_cread(vdev, struct virtio_blk_config,
1074                              max_write_zeroes_sectors, &v);
1075                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1076         }
1077
1078         virtblk_update_capacity(vblk, false);
1079         virtio_device_ready(vdev);
1080
1081         err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1082         if (err)
1083                 goto out_cleanup_disk;
1084
1085         return 0;
1086
1087 out_cleanup_disk:
1088         put_disk(vblk->disk);
1089 out_free_tags:
1090         blk_mq_free_tag_set(&vblk->tag_set);
1091 out_free_vq:
1092         vdev->config->del_vqs(vdev);
1093         kfree(vblk->vqs);
1094 out_free_vblk:
1095         kfree(vblk);
1096 out_free_index:
1097         ida_simple_remove(&vd_index_ida, index);
1098 out:
1099         return err;
1100 }
1101
1102 static void virtblk_remove(struct virtio_device *vdev)
1103 {
1104         struct virtio_blk *vblk = vdev->priv;
1105
1106         /* Make sure no work handler is accessing the device. */
1107         flush_work(&vblk->config_work);
1108
1109         del_gendisk(vblk->disk);
1110         blk_mq_free_tag_set(&vblk->tag_set);
1111
1112         mutex_lock(&vblk->vdev_mutex);
1113
1114         /* Stop all the virtqueues. */
1115         virtio_reset_device(vdev);
1116
1117         /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1118         vblk->vdev = NULL;
1119
1120         vdev->config->del_vqs(vdev);
1121         kfree(vblk->vqs);
1122
1123         mutex_unlock(&vblk->vdev_mutex);
1124
1125         put_disk(vblk->disk);
1126 }
1127
1128 #ifdef CONFIG_PM_SLEEP
1129 static int virtblk_freeze(struct virtio_device *vdev)
1130 {
1131         struct virtio_blk *vblk = vdev->priv;
1132
1133         /* Ensure we don't receive any more interrupts */
1134         virtio_reset_device(vdev);
1135
1136         /* Make sure no work handler is accessing the device. */
1137         flush_work(&vblk->config_work);
1138
1139         blk_mq_quiesce_queue(vblk->disk->queue);
1140
1141         vdev->config->del_vqs(vdev);
1142         kfree(vblk->vqs);
1143
1144         return 0;
1145 }
1146
1147 static int virtblk_restore(struct virtio_device *vdev)
1148 {
1149         struct virtio_blk *vblk = vdev->priv;
1150         int ret;
1151
1152         ret = init_vq(vdev->priv);
1153         if (ret)
1154                 return ret;
1155
1156         virtio_device_ready(vdev);
1157
1158         blk_mq_unquiesce_queue(vblk->disk->queue);
1159         return 0;
1160 }
1161 #endif
1162
1163 static const struct virtio_device_id id_table[] = {
1164         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1165         { 0 },
1166 };
1167
1168 static unsigned int features_legacy[] = {
1169         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1170         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1171         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1172         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1173 }
1174 ;
1175 static unsigned int features[] = {
1176         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1177         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1178         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1179         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1180 };
1181
1182 static struct virtio_driver virtio_blk = {
1183         .feature_table                  = features,
1184         .feature_table_size             = ARRAY_SIZE(features),
1185         .feature_table_legacy           = features_legacy,
1186         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
1187         .driver.name                    = KBUILD_MODNAME,
1188         .driver.owner                   = THIS_MODULE,
1189         .id_table                       = id_table,
1190         .probe                          = virtblk_probe,
1191         .remove                         = virtblk_remove,
1192         .config_changed                 = virtblk_config_changed,
1193 #ifdef CONFIG_PM_SLEEP
1194         .freeze                         = virtblk_freeze,
1195         .restore                        = virtblk_restore,
1196 #endif
1197 };
1198
1199 static int __init virtio_blk_init(void)
1200 {
1201         int error;
1202
1203         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1204         if (!virtblk_wq)
1205                 return -ENOMEM;
1206
1207         major = register_blkdev(0, "virtblk");
1208         if (major < 0) {
1209                 error = major;
1210                 goto out_destroy_workqueue;
1211         }
1212
1213         error = register_virtio_driver(&virtio_blk);
1214         if (error)
1215                 goto out_unregister_blkdev;
1216         return 0;
1217
1218 out_unregister_blkdev:
1219         unregister_blkdev(major, "virtblk");
1220 out_destroy_workqueue:
1221         destroy_workqueue(virtblk_wq);
1222         return error;
1223 }
1224
1225 static void __exit virtio_blk_fini(void)
1226 {
1227         unregister_virtio_driver(&virtio_blk);
1228         unregister_blkdev(major, "virtblk");
1229         destroy_workqueue(virtblk_wq);
1230 }
1231 module_init(virtio_blk_init);
1232 module_exit(virtio_blk_fini);
1233
1234 MODULE_DEVICE_TABLE(virtio, id_table);
1235 MODULE_DESCRIPTION("Virtio block driver");
1236 MODULE_LICENSE("GPL");