adfe43f5ffe48f354129960cfd89cb4db600784e
[linux-2.6-microblaze.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18
19 #define PART_BITS 4
20 #define VQ_NAME_LEN 16
21 #define MAX_DISCARD_SEGMENTS 256u
22
23 static int major;
24 static DEFINE_IDA(vd_index_ida);
25
26 static struct workqueue_struct *virtblk_wq;
27
28 struct virtio_blk_vq {
29         struct virtqueue *vq;
30         spinlock_t lock;
31         char name[VQ_NAME_LEN];
32 } ____cacheline_aligned_in_smp;
33
34 struct virtio_blk {
35         struct virtio_device *vdev;
36
37         /* The disk structure for the kernel. */
38         struct gendisk *disk;
39
40         /* Block layer tags. */
41         struct blk_mq_tag_set tag_set;
42
43         /* Process context for config space updates */
44         struct work_struct config_work;
45
46         /* What host tells us, plus 2 for header & tailer. */
47         unsigned int sg_elems;
48
49         /* Ida index - used to track minor number allocations. */
50         int index;
51
52         /* num of vqs */
53         int num_vqs;
54         struct virtio_blk_vq *vqs;
55 };
56
57 struct virtblk_req {
58         struct virtio_blk_outhdr out_hdr;
59         u8 status;
60         struct scatterlist sg[];
61 };
62
63 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
64 {
65         switch (vbr->status) {
66         case VIRTIO_BLK_S_OK:
67                 return BLK_STS_OK;
68         case VIRTIO_BLK_S_UNSUPP:
69                 return BLK_STS_NOTSUPP;
70         default:
71                 return BLK_STS_IOERR;
72         }
73 }
74
75 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
76                 struct scatterlist *data_sg, bool have_data)
77 {
78         struct scatterlist hdr, status, *sgs[3];
79         unsigned int num_out = 0, num_in = 0;
80
81         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
82         sgs[num_out++] = &hdr;
83
84         if (have_data) {
85                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
86                         sgs[num_out++] = data_sg;
87                 else
88                         sgs[num_out + num_in++] = data_sg;
89         }
90
91         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
92         sgs[num_out + num_in++] = &status;
93
94         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
95 }
96
97 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
98 {
99         unsigned short segments = blk_rq_nr_discard_segments(req);
100         unsigned short n = 0;
101         struct virtio_blk_discard_write_zeroes *range;
102         struct bio *bio;
103         u32 flags = 0;
104
105         if (unmap)
106                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
107
108         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
109         if (!range)
110                 return -ENOMEM;
111
112         __rq_for_each_bio(bio, req) {
113                 u64 sector = bio->bi_iter.bi_sector;
114                 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
115
116                 range[n].flags = cpu_to_le32(flags);
117                 range[n].num_sectors = cpu_to_le32(num_sectors);
118                 range[n].sector = cpu_to_le64(sector);
119                 n++;
120         }
121
122         req->special_vec.bv_page = virt_to_page(range);
123         req->special_vec.bv_offset = offset_in_page(range);
124         req->special_vec.bv_len = sizeof(*range) * segments;
125         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
126
127         return 0;
128 }
129
130 static inline void virtblk_request_done(struct request *req)
131 {
132         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
133
134         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
135                 kfree(page_address(req->special_vec.bv_page) +
136                       req->special_vec.bv_offset);
137         }
138
139         blk_mq_end_request(req, virtblk_result(vbr));
140 }
141
142 static void virtblk_done(struct virtqueue *vq)
143 {
144         struct virtio_blk *vblk = vq->vdev->priv;
145         bool req_done = false;
146         int qid = vq->index;
147         struct virtblk_req *vbr;
148         unsigned long flags;
149         unsigned int len;
150
151         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
152         do {
153                 virtqueue_disable_cb(vq);
154                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
155                         struct request *req = blk_mq_rq_from_pdu(vbr);
156
157                         blk_mq_complete_request(req);
158                         req_done = true;
159                 }
160                 if (unlikely(virtqueue_is_broken(vq)))
161                         break;
162         } while (!virtqueue_enable_cb(vq));
163
164         /* In case queue is stopped waiting for more buffers. */
165         if (req_done)
166                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
167         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
168 }
169
170 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
171 {
172         struct virtio_blk *vblk = hctx->queue->queuedata;
173         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
174         bool kick;
175
176         spin_lock_irq(&vq->lock);
177         kick = virtqueue_kick_prepare(vq->vq);
178         spin_unlock_irq(&vq->lock);
179
180         if (kick)
181                 virtqueue_notify(vq->vq);
182 }
183
184 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
185                            const struct blk_mq_queue_data *bd)
186 {
187         struct virtio_blk *vblk = hctx->queue->queuedata;
188         struct request *req = bd->rq;
189         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
190         unsigned long flags;
191         unsigned int num;
192         int qid = hctx->queue_num;
193         int err;
194         bool notify = false;
195         bool unmap = false;
196         u32 type;
197
198         BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
199
200         switch (req_op(req)) {
201         case REQ_OP_READ:
202         case REQ_OP_WRITE:
203                 type = 0;
204                 break;
205         case REQ_OP_FLUSH:
206                 type = VIRTIO_BLK_T_FLUSH;
207                 break;
208         case REQ_OP_DISCARD:
209                 type = VIRTIO_BLK_T_DISCARD;
210                 break;
211         case REQ_OP_WRITE_ZEROES:
212                 type = VIRTIO_BLK_T_WRITE_ZEROES;
213                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
214                 break;
215         case REQ_OP_DRV_IN:
216                 type = VIRTIO_BLK_T_GET_ID;
217                 break;
218         default:
219                 WARN_ON_ONCE(1);
220                 return BLK_STS_IOERR;
221         }
222
223         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
224         vbr->out_hdr.sector = type ?
225                 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
226         vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
227
228         blk_mq_start_request(req);
229
230         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
231                 err = virtblk_setup_discard_write_zeroes(req, unmap);
232                 if (err)
233                         return BLK_STS_RESOURCE;
234         }
235
236         num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
237         if (num) {
238                 if (rq_data_dir(req) == WRITE)
239                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
240                 else
241                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
242         }
243
244         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
245         err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
246         if (err) {
247                 virtqueue_kick(vblk->vqs[qid].vq);
248                 /* Don't stop the queue if -ENOMEM: we may have failed to
249                  * bounce the buffer due to global resource outage.
250                  */
251                 if (err == -ENOSPC)
252                         blk_mq_stop_hw_queue(hctx);
253                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
254                 if (err == -ENOMEM || err == -ENOSPC)
255                         return BLK_STS_DEV_RESOURCE;
256                 return BLK_STS_IOERR;
257         }
258
259         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
260                 notify = true;
261         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
262
263         if (notify)
264                 virtqueue_notify(vblk->vqs[qid].vq);
265         return BLK_STS_OK;
266 }
267
268 /* return id (s/n) string for *disk to *id_str
269  */
270 static int virtblk_get_id(struct gendisk *disk, char *id_str)
271 {
272         struct virtio_blk *vblk = disk->private_data;
273         struct request_queue *q = vblk->disk->queue;
274         struct request *req;
275         int err;
276
277         req = blk_get_request(q, REQ_OP_DRV_IN, 0);
278         if (IS_ERR(req))
279                 return PTR_ERR(req);
280
281         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
282         if (err)
283                 goto out;
284
285         blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
286         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
287 out:
288         blk_put_request(req);
289         return err;
290 }
291
292 /* We provide getgeo only to please some old bootloader/partitioning tools */
293 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
294 {
295         struct virtio_blk *vblk = bd->bd_disk->private_data;
296
297         /* see if the host passed in geometry config */
298         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
299                 virtio_cread(vblk->vdev, struct virtio_blk_config,
300                              geometry.cylinders, &geo->cylinders);
301                 virtio_cread(vblk->vdev, struct virtio_blk_config,
302                              geometry.heads, &geo->heads);
303                 virtio_cread(vblk->vdev, struct virtio_blk_config,
304                              geometry.sectors, &geo->sectors);
305         } else {
306                 /* some standard values, similar to sd */
307                 geo->heads = 1 << 6;
308                 geo->sectors = 1 << 5;
309                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
310         }
311         return 0;
312 }
313
314 static const struct block_device_operations virtblk_fops = {
315         .owner  = THIS_MODULE,
316         .getgeo = virtblk_getgeo,
317 };
318
319 static int index_to_minor(int index)
320 {
321         return index << PART_BITS;
322 }
323
324 static int minor_to_index(int minor)
325 {
326         return minor >> PART_BITS;
327 }
328
329 static ssize_t serial_show(struct device *dev,
330                            struct device_attribute *attr, char *buf)
331 {
332         struct gendisk *disk = dev_to_disk(dev);
333         int err;
334
335         /* sysfs gives us a PAGE_SIZE buffer */
336         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
337
338         buf[VIRTIO_BLK_ID_BYTES] = '\0';
339         err = virtblk_get_id(disk, buf);
340         if (!err)
341                 return strlen(buf);
342
343         if (err == -EIO) /* Unsupported? Make it empty. */
344                 return 0;
345
346         return err;
347 }
348
349 static DEVICE_ATTR_RO(serial);
350
351 /* The queue's logical block size must be set before calling this */
352 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
353 {
354         struct virtio_device *vdev = vblk->vdev;
355         struct request_queue *q = vblk->disk->queue;
356         char cap_str_2[10], cap_str_10[10];
357         unsigned long long nblocks;
358         u64 capacity;
359
360         /* Host must always specify the capacity. */
361         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
362
363         /* If capacity is too big, truncate with warning. */
364         if ((sector_t)capacity != capacity) {
365                 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
366                          (unsigned long long)capacity);
367                 capacity = (sector_t)-1;
368         }
369
370         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
371
372         string_get_size(nblocks, queue_logical_block_size(q),
373                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
374         string_get_size(nblocks, queue_logical_block_size(q),
375                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
376
377         dev_notice(&vdev->dev,
378                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
379                    vblk->disk->disk_name,
380                    resize ? "new size: " : "",
381                    nblocks,
382                    queue_logical_block_size(q),
383                    cap_str_10,
384                    cap_str_2);
385
386         set_capacity(vblk->disk, capacity);
387 }
388
389 static void virtblk_config_changed_work(struct work_struct *work)
390 {
391         struct virtio_blk *vblk =
392                 container_of(work, struct virtio_blk, config_work);
393         char *envp[] = { "RESIZE=1", NULL };
394
395         virtblk_update_capacity(vblk, true);
396         revalidate_disk(vblk->disk);
397         kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
398 }
399
400 static void virtblk_config_changed(struct virtio_device *vdev)
401 {
402         struct virtio_blk *vblk = vdev->priv;
403
404         queue_work(virtblk_wq, &vblk->config_work);
405 }
406
407 static int init_vq(struct virtio_blk *vblk)
408 {
409         int err;
410         int i;
411         vq_callback_t **callbacks;
412         const char **names;
413         struct virtqueue **vqs;
414         unsigned short num_vqs;
415         struct virtio_device *vdev = vblk->vdev;
416         struct irq_affinity desc = { 0, };
417
418         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
419                                    struct virtio_blk_config, num_queues,
420                                    &num_vqs);
421         if (err)
422                 num_vqs = 1;
423
424         num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
425
426         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
427         if (!vblk->vqs)
428                 return -ENOMEM;
429
430         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
431         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
432         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
433         if (!names || !callbacks || !vqs) {
434                 err = -ENOMEM;
435                 goto out;
436         }
437
438         for (i = 0; i < num_vqs; i++) {
439                 callbacks[i] = virtblk_done;
440                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
441                 names[i] = vblk->vqs[i].name;
442         }
443
444         /* Discover virtqueues and write information to configuration.  */
445         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
446         if (err)
447                 goto out;
448
449         for (i = 0; i < num_vqs; i++) {
450                 spin_lock_init(&vblk->vqs[i].lock);
451                 vblk->vqs[i].vq = vqs[i];
452         }
453         vblk->num_vqs = num_vqs;
454
455 out:
456         kfree(vqs);
457         kfree(callbacks);
458         kfree(names);
459         if (err)
460                 kfree(vblk->vqs);
461         return err;
462 }
463
464 /*
465  * Legacy naming scheme used for virtio devices.  We are stuck with it for
466  * virtio blk but don't ever use it for any new driver.
467  */
468 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
469 {
470         const int base = 'z' - 'a' + 1;
471         char *begin = buf + strlen(prefix);
472         char *end = buf + buflen;
473         char *p;
474         int unit;
475
476         p = end - 1;
477         *p = '\0';
478         unit = base;
479         do {
480                 if (p == begin)
481                         return -EINVAL;
482                 *--p = 'a' + (index % unit);
483                 index = (index / unit) - 1;
484         } while (index >= 0);
485
486         memmove(begin, p, end - p);
487         memcpy(buf, prefix, strlen(prefix));
488
489         return 0;
490 }
491
492 static int virtblk_get_cache_mode(struct virtio_device *vdev)
493 {
494         u8 writeback;
495         int err;
496
497         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
498                                    struct virtio_blk_config, wce,
499                                    &writeback);
500
501         /*
502          * If WCE is not configurable and flush is not available,
503          * assume no writeback cache is in use.
504          */
505         if (err)
506                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
507
508         return writeback;
509 }
510
511 static void virtblk_update_cache_mode(struct virtio_device *vdev)
512 {
513         u8 writeback = virtblk_get_cache_mode(vdev);
514         struct virtio_blk *vblk = vdev->priv;
515
516         blk_queue_write_cache(vblk->disk->queue, writeback, false);
517         revalidate_disk(vblk->disk);
518 }
519
520 static const char *const virtblk_cache_types[] = {
521         "write through", "write back"
522 };
523
524 static ssize_t
525 cache_type_store(struct device *dev, struct device_attribute *attr,
526                  const char *buf, size_t count)
527 {
528         struct gendisk *disk = dev_to_disk(dev);
529         struct virtio_blk *vblk = disk->private_data;
530         struct virtio_device *vdev = vblk->vdev;
531         int i;
532
533         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
534         i = sysfs_match_string(virtblk_cache_types, buf);
535         if (i < 0)
536                 return i;
537
538         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
539         virtblk_update_cache_mode(vdev);
540         return count;
541 }
542
543 static ssize_t
544 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
545 {
546         struct gendisk *disk = dev_to_disk(dev);
547         struct virtio_blk *vblk = disk->private_data;
548         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
549
550         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
551         return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
552 }
553
554 static DEVICE_ATTR_RW(cache_type);
555
556 static struct attribute *virtblk_attrs[] = {
557         &dev_attr_serial.attr,
558         &dev_attr_cache_type.attr,
559         NULL,
560 };
561
562 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
563                 struct attribute *a, int n)
564 {
565         struct device *dev = container_of(kobj, struct device, kobj);
566         struct gendisk *disk = dev_to_disk(dev);
567         struct virtio_blk *vblk = disk->private_data;
568         struct virtio_device *vdev = vblk->vdev;
569
570         if (a == &dev_attr_cache_type.attr &&
571             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
572                 return S_IRUGO;
573
574         return a->mode;
575 }
576
577 static const struct attribute_group virtblk_attr_group = {
578         .attrs = virtblk_attrs,
579         .is_visible = virtblk_attrs_are_visible,
580 };
581
582 static const struct attribute_group *virtblk_attr_groups[] = {
583         &virtblk_attr_group,
584         NULL,
585 };
586
587 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
588                 unsigned int hctx_idx, unsigned int numa_node)
589 {
590         struct virtio_blk *vblk = set->driver_data;
591         struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
592
593         sg_init_table(vbr->sg, vblk->sg_elems);
594         return 0;
595 }
596
597 static int virtblk_map_queues(struct blk_mq_tag_set *set)
598 {
599         struct virtio_blk *vblk = set->driver_data;
600
601         return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
602                                         vblk->vdev, 0);
603 }
604
605 static const struct blk_mq_ops virtio_mq_ops = {
606         .queue_rq       = virtio_queue_rq,
607         .commit_rqs     = virtio_commit_rqs,
608         .complete       = virtblk_request_done,
609         .init_request   = virtblk_init_request,
610         .map_queues     = virtblk_map_queues,
611 };
612
613 static unsigned int virtblk_queue_depth;
614 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
615
616 static int virtblk_probe(struct virtio_device *vdev)
617 {
618         struct virtio_blk *vblk;
619         struct request_queue *q;
620         int err, index;
621
622         u32 v, blk_size, max_size, sg_elems, opt_io_size;
623         u16 min_io_size;
624         u8 physical_block_exp, alignment_offset;
625
626         if (!vdev->config->get) {
627                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
628                         __func__);
629                 return -EINVAL;
630         }
631
632         err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
633                              GFP_KERNEL);
634         if (err < 0)
635                 goto out;
636         index = err;
637
638         /* We need to know how many segments before we allocate. */
639         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
640                                    struct virtio_blk_config, seg_max,
641                                    &sg_elems);
642
643         /* We need at least one SG element, whatever they say. */
644         if (err || !sg_elems)
645                 sg_elems = 1;
646
647         /* We need an extra sg elements at head and tail. */
648         sg_elems += 2;
649         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
650         if (!vblk) {
651                 err = -ENOMEM;
652                 goto out_free_index;
653         }
654
655         vblk->vdev = vdev;
656         vblk->sg_elems = sg_elems;
657
658         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
659
660         err = init_vq(vblk);
661         if (err)
662                 goto out_free_vblk;
663
664         /* FIXME: How many partitions?  How long is a piece of string? */
665         vblk->disk = alloc_disk(1 << PART_BITS);
666         if (!vblk->disk) {
667                 err = -ENOMEM;
668                 goto out_free_vq;
669         }
670
671         /* Default queue sizing is to fill the ring. */
672         if (!virtblk_queue_depth) {
673                 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
674                 /* ... but without indirect descs, we use 2 descs per req */
675                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
676                         virtblk_queue_depth /= 2;
677         }
678
679         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
680         vblk->tag_set.ops = &virtio_mq_ops;
681         vblk->tag_set.queue_depth = virtblk_queue_depth;
682         vblk->tag_set.numa_node = NUMA_NO_NODE;
683         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
684         vblk->tag_set.cmd_size =
685                 sizeof(struct virtblk_req) +
686                 sizeof(struct scatterlist) * sg_elems;
687         vblk->tag_set.driver_data = vblk;
688         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
689
690         err = blk_mq_alloc_tag_set(&vblk->tag_set);
691         if (err)
692                 goto out_put_disk;
693
694         q = blk_mq_init_queue(&vblk->tag_set);
695         if (IS_ERR(q)) {
696                 err = -ENOMEM;
697                 goto out_free_tags;
698         }
699         vblk->disk->queue = q;
700
701         q->queuedata = vblk;
702
703         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
704
705         vblk->disk->major = major;
706         vblk->disk->first_minor = index_to_minor(index);
707         vblk->disk->private_data = vblk;
708         vblk->disk->fops = &virtblk_fops;
709         vblk->disk->flags |= GENHD_FL_EXT_DEVT;
710         vblk->index = index;
711
712         /* configure queue flush support */
713         virtblk_update_cache_mode(vdev);
714
715         /* If disk is read-only in the host, the guest should obey */
716         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
717                 set_disk_ro(vblk->disk, 1);
718
719         /* We can handle whatever the host told us to handle. */
720         blk_queue_max_segments(q, vblk->sg_elems-2);
721
722         /* No real sector limit. */
723         blk_queue_max_hw_sectors(q, -1U);
724
725         max_size = virtio_max_dma_size(vdev);
726
727         /* Host can optionally specify maximum segment size and number of
728          * segments. */
729         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
730                                    struct virtio_blk_config, size_max, &v);
731         if (!err)
732                 max_size = min(max_size, v);
733
734         blk_queue_max_segment_size(q, max_size);
735
736         /* Host can optionally specify the block size of the device */
737         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
738                                    struct virtio_blk_config, blk_size,
739                                    &blk_size);
740         if (!err)
741                 blk_queue_logical_block_size(q, blk_size);
742         else
743                 blk_size = queue_logical_block_size(q);
744
745         /* Use topology information if available */
746         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
747                                    struct virtio_blk_config, physical_block_exp,
748                                    &physical_block_exp);
749         if (!err && physical_block_exp)
750                 blk_queue_physical_block_size(q,
751                                 blk_size * (1 << physical_block_exp));
752
753         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
754                                    struct virtio_blk_config, alignment_offset,
755                                    &alignment_offset);
756         if (!err && alignment_offset)
757                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
758
759         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
760                                    struct virtio_blk_config, min_io_size,
761                                    &min_io_size);
762         if (!err && min_io_size)
763                 blk_queue_io_min(q, blk_size * min_io_size);
764
765         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
766                                    struct virtio_blk_config, opt_io_size,
767                                    &opt_io_size);
768         if (!err && opt_io_size)
769                 blk_queue_io_opt(q, blk_size * opt_io_size);
770
771         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
772                 q->limits.discard_granularity = blk_size;
773
774                 virtio_cread(vdev, struct virtio_blk_config,
775                              discard_sector_alignment, &v);
776                 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
777
778                 virtio_cread(vdev, struct virtio_blk_config,
779                              max_discard_sectors, &v);
780                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
781
782                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
783                              &v);
784                 blk_queue_max_discard_segments(q,
785                                                min_not_zero(v,
786                                                             MAX_DISCARD_SEGMENTS));
787
788                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
789         }
790
791         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
792                 virtio_cread(vdev, struct virtio_blk_config,
793                              max_write_zeroes_sectors, &v);
794                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
795         }
796
797         virtblk_update_capacity(vblk, false);
798         virtio_device_ready(vdev);
799
800         device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
801         return 0;
802
803 out_free_tags:
804         blk_mq_free_tag_set(&vblk->tag_set);
805 out_put_disk:
806         put_disk(vblk->disk);
807 out_free_vq:
808         vdev->config->del_vqs(vdev);
809 out_free_vblk:
810         kfree(vblk);
811 out_free_index:
812         ida_simple_remove(&vd_index_ida, index);
813 out:
814         return err;
815 }
816
817 static void virtblk_remove(struct virtio_device *vdev)
818 {
819         struct virtio_blk *vblk = vdev->priv;
820         int index = vblk->index;
821         int refc;
822
823         /* Make sure no work handler is accessing the device. */
824         flush_work(&vblk->config_work);
825
826         del_gendisk(vblk->disk);
827         blk_cleanup_queue(vblk->disk->queue);
828
829         blk_mq_free_tag_set(&vblk->tag_set);
830
831         /* Stop all the virtqueues. */
832         vdev->config->reset(vdev);
833
834         refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
835         put_disk(vblk->disk);
836         vdev->config->del_vqs(vdev);
837         kfree(vblk->vqs);
838         kfree(vblk);
839
840         /* Only free device id if we don't have any users */
841         if (refc == 1)
842                 ida_simple_remove(&vd_index_ida, index);
843 }
844
845 #ifdef CONFIG_PM_SLEEP
846 static int virtblk_freeze(struct virtio_device *vdev)
847 {
848         struct virtio_blk *vblk = vdev->priv;
849
850         /* Ensure we don't receive any more interrupts */
851         vdev->config->reset(vdev);
852
853         /* Make sure no work handler is accessing the device. */
854         flush_work(&vblk->config_work);
855
856         blk_mq_quiesce_queue(vblk->disk->queue);
857
858         vdev->config->del_vqs(vdev);
859         return 0;
860 }
861
862 static int virtblk_restore(struct virtio_device *vdev)
863 {
864         struct virtio_blk *vblk = vdev->priv;
865         int ret;
866
867         ret = init_vq(vdev->priv);
868         if (ret)
869                 return ret;
870
871         virtio_device_ready(vdev);
872
873         blk_mq_unquiesce_queue(vblk->disk->queue);
874         return 0;
875 }
876 #endif
877
878 static const struct virtio_device_id id_table[] = {
879         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
880         { 0 },
881 };
882
883 static unsigned int features_legacy[] = {
884         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
885         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
886         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
887         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
888 }
889 ;
890 static unsigned int features[] = {
891         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
892         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
893         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
894         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
895 };
896
897 static struct virtio_driver virtio_blk = {
898         .feature_table                  = features,
899         .feature_table_size             = ARRAY_SIZE(features),
900         .feature_table_legacy           = features_legacy,
901         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
902         .driver.name                    = KBUILD_MODNAME,
903         .driver.owner                   = THIS_MODULE,
904         .id_table                       = id_table,
905         .probe                          = virtblk_probe,
906         .remove                         = virtblk_remove,
907         .config_changed                 = virtblk_config_changed,
908 #ifdef CONFIG_PM_SLEEP
909         .freeze                         = virtblk_freeze,
910         .restore                        = virtblk_restore,
911 #endif
912 };
913
914 static int __init init(void)
915 {
916         int error;
917
918         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
919         if (!virtblk_wq)
920                 return -ENOMEM;
921
922         major = register_blkdev(0, "virtblk");
923         if (major < 0) {
924                 error = major;
925                 goto out_destroy_workqueue;
926         }
927
928         error = register_virtio_driver(&virtio_blk);
929         if (error)
930                 goto out_unregister_blkdev;
931         return 0;
932
933 out_unregister_blkdev:
934         unregister_blkdev(major, "virtblk");
935 out_destroy_workqueue:
936         destroy_workqueue(virtblk_wq);
937         return error;
938 }
939
940 static void __exit fini(void)
941 {
942         unregister_virtio_driver(&virtio_blk);
943         unregister_blkdev(major, "virtblk");
944         destroy_workqueue(virtblk_wq);
945 }
946 module_init(init);
947 module_exit(fini);
948
949 MODULE_DEVICE_TABLE(virtio, id_table);
950 MODULE_DESCRIPTION("Virtio block driver");
951 MODULE_LICENSE("GPL");