1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/spinlock.h>
17 static bool force_used_validation = false;
18 module_param(force_used_validation, bool, 0444);
21 /* For development, we want to crash whenever the ring is screwed. */
22 #define BAD_RING(_vq, fmt, args...) \
24 dev_err(&(_vq)->vq.vdev->dev, \
25 "%s:"fmt, (_vq)->vq.name, ##args); \
28 /* Caller is supposed to guarantee no reentry. */
29 #define START_USE(_vq) \
32 panic("%s:in_use = %i\n", \
33 (_vq)->vq.name, (_vq)->in_use); \
34 (_vq)->in_use = __LINE__; \
36 #define END_USE(_vq) \
37 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
38 #define LAST_ADD_TIME_UPDATE(_vq) \
40 ktime_t now = ktime_get(); \
42 /* No kick or get, with .1 second between? Warn. */ \
43 if ((_vq)->last_add_time_valid) \
44 WARN_ON(ktime_to_ms(ktime_sub(now, \
45 (_vq)->last_add_time)) > 100); \
46 (_vq)->last_add_time = now; \
47 (_vq)->last_add_time_valid = true; \
49 #define LAST_ADD_TIME_CHECK(_vq) \
51 if ((_vq)->last_add_time_valid) { \
52 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
53 (_vq)->last_add_time)) > 100); \
56 #define LAST_ADD_TIME_INVALID(_vq) \
57 ((_vq)->last_add_time_valid = false)
59 #define BAD_RING(_vq, fmt, args...) \
61 dev_err(&_vq->vq.vdev->dev, \
62 "%s:"fmt, (_vq)->vq.name, ##args); \
63 (_vq)->broken = true; \
67 #define LAST_ADD_TIME_UPDATE(vq)
68 #define LAST_ADD_TIME_CHECK(vq)
69 #define LAST_ADD_TIME_INVALID(vq)
72 struct vring_desc_state_split {
73 void *data; /* Data for callback. */
74 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
77 struct vring_desc_state_packed {
78 void *data; /* Data for callback. */
79 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
80 u16 num; /* Descriptor list length. */
81 u16 last; /* The last desc state in a list. */
84 struct vring_desc_extra {
85 dma_addr_t addr; /* Descriptor DMA addr. */
86 u32 len; /* Descriptor length. */
87 u16 flags; /* Descriptor flags. */
88 u16 next; /* The next desc state in a list. */
91 struct vring_virtqueue {
94 /* Is this a packed ring? */
97 /* Is DMA API used? */
100 /* Can we use weak barriers? */
103 /* Other side has made a mess, don't try any more. */
106 /* Host supports indirect buffers */
109 /* Host publishes avail event idx */
112 /* Head of free buffer list. */
113 unsigned int free_head;
114 /* Number we've added since last sync. */
115 unsigned int num_added;
117 /* Last used index we've seen. */
120 /* Hint for event idx: already triggered no need to disable. */
121 bool event_triggered;
124 /* Available for split ring */
126 /* Actual memory layout for this queue. */
129 /* Last written value to avail->flags */
130 u16 avail_flags_shadow;
133 * Last written value to avail->idx in
136 u16 avail_idx_shadow;
138 /* Per-descriptor state. */
139 struct vring_desc_state_split *desc_state;
140 struct vring_desc_extra *desc_extra;
142 /* DMA address and size information */
143 dma_addr_t queue_dma_addr;
144 size_t queue_size_in_bytes;
147 /* Available for packed ring */
149 /* Actual memory layout for this queue. */
152 struct vring_packed_desc *desc;
153 struct vring_packed_desc_event *driver;
154 struct vring_packed_desc_event *device;
157 /* Driver ring wrap counter. */
158 bool avail_wrap_counter;
160 /* Device ring wrap counter. */
161 bool used_wrap_counter;
163 /* Avail used flags. */
164 u16 avail_used_flags;
166 /* Index of the next avail descriptor. */
170 * Last written value to driver->flags in
173 u16 event_flags_shadow;
175 /* Per-descriptor state. */
176 struct vring_desc_state_packed *desc_state;
177 struct vring_desc_extra *desc_extra;
179 /* DMA address and size information */
180 dma_addr_t ring_dma_addr;
181 dma_addr_t driver_event_dma_addr;
182 dma_addr_t device_event_dma_addr;
183 size_t ring_size_in_bytes;
184 size_t event_size_in_bytes;
188 /* Per-descriptor in buffer length */
191 /* How to notify other side. FIXME: commonalize hcalls! */
192 bool (*notify)(struct virtqueue *vq);
194 /* DMA, allocation, and size information */
198 /* They're supposed to lock for us. */
201 /* Figure out if their kicks are too delayed. */
202 bool last_add_time_valid;
203 ktime_t last_add_time;
212 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
214 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
215 unsigned int total_sg)
217 struct vring_virtqueue *vq = to_vvq(_vq);
220 * If the host supports indirect descriptor tables, and we have multiple
221 * buffers, then go indirect. FIXME: tune this threshold
223 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
227 * Modern virtio devices have feature bits to specify whether they need a
228 * quirk and bypass the IOMMU. If not there, just use the DMA API.
230 * If there, the interaction between virtio and DMA API is messy.
232 * On most systems with virtio, physical addresses match bus addresses,
233 * and it doesn't particularly matter whether we use the DMA API.
235 * On some systems, including Xen and any system with a physical device
236 * that speaks virtio behind a physical IOMMU, we must use the DMA API
237 * for virtio DMA to work at all.
239 * On other systems, including SPARC and PPC64, virtio-pci devices are
240 * enumerated as though they are behind an IOMMU, but the virtio host
241 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
242 * there or somehow map everything as the identity.
244 * For the time being, we preserve historic behavior and bypass the DMA
247 * TODO: install a per-device DMA ops structure that does the right thing
248 * taking into account all the above quirks, and use the DMA API
249 * unconditionally on data path.
252 static bool vring_use_dma_api(struct virtio_device *vdev)
254 if (!virtio_has_dma_quirk(vdev))
257 /* Otherwise, we are left to guess. */
259 * In theory, it's possible to have a buggy QEMU-supposed
260 * emulated Q35 IOMMU and Xen enabled at the same time. On
261 * such a configuration, virtio has never worked and will
262 * not work without an even larger kludge. Instead, enable
263 * the DMA API if we're a Xen guest, which at least allows
264 * all of the sensible Xen configurations to work correctly.
272 size_t virtio_max_dma_size(struct virtio_device *vdev)
274 size_t max_segment_size = SIZE_MAX;
276 if (vring_use_dma_api(vdev))
277 max_segment_size = dma_max_mapping_size(&vdev->dev);
279 return max_segment_size;
281 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
283 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
284 dma_addr_t *dma_handle, gfp_t flag)
286 if (vring_use_dma_api(vdev)) {
287 return dma_alloc_coherent(vdev->dev.parent, size,
290 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
293 phys_addr_t phys_addr = virt_to_phys(queue);
294 *dma_handle = (dma_addr_t)phys_addr;
297 * Sanity check: make sure we dind't truncate
298 * the address. The only arches I can find that
299 * have 64-bit phys_addr_t but 32-bit dma_addr_t
300 * are certain non-highmem MIPS and x86
301 * configurations, but these configurations
302 * should never allocate physical pages above 32
303 * bits, so this is fine. Just in case, throw a
304 * warning and abort if we end up with an
305 * unrepresentable address.
307 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
308 free_pages_exact(queue, PAGE_ALIGN(size));
316 static void vring_free_queue(struct virtio_device *vdev, size_t size,
317 void *queue, dma_addr_t dma_handle)
319 if (vring_use_dma_api(vdev))
320 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
322 free_pages_exact(queue, PAGE_ALIGN(size));
326 * The DMA ops on various arches are rather gnarly right now, and
327 * making all of the arch DMA ops work on the vring device itself
328 * is a mess. For now, we use the parent device for DMA ops.
330 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
332 return vq->vq.vdev->dev.parent;
335 /* Map one sg entry. */
336 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
337 struct scatterlist *sg,
338 enum dma_data_direction direction)
340 if (!vq->use_dma_api)
341 return (dma_addr_t)sg_phys(sg);
344 * We can't use dma_map_sg, because we don't use scatterlists in
345 * the way it expects (we don't guarantee that the scatterlist
346 * will exist for the lifetime of the mapping).
348 return dma_map_page(vring_dma_dev(vq),
349 sg_page(sg), sg->offset, sg->length,
353 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
354 void *cpu_addr, size_t size,
355 enum dma_data_direction direction)
357 if (!vq->use_dma_api)
358 return (dma_addr_t)virt_to_phys(cpu_addr);
360 return dma_map_single(vring_dma_dev(vq),
361 cpu_addr, size, direction);
364 static int vring_mapping_error(const struct vring_virtqueue *vq,
367 if (!vq->use_dma_api)
370 return dma_mapping_error(vring_dma_dev(vq), addr);
375 * Split ring specific functions - *_split().
378 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
379 struct vring_desc *desc)
383 if (!vq->use_dma_api)
386 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
388 if (flags & VRING_DESC_F_INDIRECT) {
389 dma_unmap_single(vring_dma_dev(vq),
390 virtio64_to_cpu(vq->vq.vdev, desc->addr),
391 virtio32_to_cpu(vq->vq.vdev, desc->len),
392 (flags & VRING_DESC_F_WRITE) ?
393 DMA_FROM_DEVICE : DMA_TO_DEVICE);
395 dma_unmap_page(vring_dma_dev(vq),
396 virtio64_to_cpu(vq->vq.vdev, desc->addr),
397 virtio32_to_cpu(vq->vq.vdev, desc->len),
398 (flags & VRING_DESC_F_WRITE) ?
399 DMA_FROM_DEVICE : DMA_TO_DEVICE);
403 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
406 struct vring_desc_extra *extra = vq->split.desc_extra;
409 if (!vq->use_dma_api)
412 flags = extra[i].flags;
414 if (flags & VRING_DESC_F_INDIRECT) {
415 dma_unmap_single(vring_dma_dev(vq),
418 (flags & VRING_DESC_F_WRITE) ?
419 DMA_FROM_DEVICE : DMA_TO_DEVICE);
421 dma_unmap_page(vring_dma_dev(vq),
424 (flags & VRING_DESC_F_WRITE) ?
425 DMA_FROM_DEVICE : DMA_TO_DEVICE);
429 return extra[i].next;
432 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
433 unsigned int total_sg,
436 struct vring_desc *desc;
440 * We require lowmem mappings for the descriptors because
441 * otherwise virt_to_phys will give us bogus addresses in the
444 gfp &= ~__GFP_HIGHMEM;
446 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
450 for (i = 0; i < total_sg; i++)
451 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
455 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
456 struct vring_desc *desc,
463 struct vring_virtqueue *vring = to_vvq(vq);
464 struct vring_desc_extra *extra = vring->split.desc_extra;
467 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
468 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
469 desc[i].len = cpu_to_virtio32(vq->vdev, len);
472 next = extra[i].next;
473 desc[i].next = cpu_to_virtio16(vq->vdev, next);
475 extra[i].addr = addr;
477 extra[i].flags = flags;
479 next = virtio16_to_cpu(vq->vdev, desc[i].next);
484 static inline int virtqueue_add_split(struct virtqueue *_vq,
485 struct scatterlist *sgs[],
486 unsigned int total_sg,
487 unsigned int out_sgs,
493 struct vring_virtqueue *vq = to_vvq(_vq);
494 struct scatterlist *sg;
495 struct vring_desc *desc;
496 unsigned int i, n, avail, descs_used, prev, err_idx;
503 BUG_ON(data == NULL);
504 BUG_ON(ctx && vq->indirect);
506 if (unlikely(vq->broken)) {
511 LAST_ADD_TIME_UPDATE(vq);
513 BUG_ON(total_sg == 0);
515 head = vq->free_head;
517 if (virtqueue_use_indirect(_vq, total_sg))
518 desc = alloc_indirect_split(_vq, total_sg, gfp);
521 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
525 /* Use a single buffer which doesn't continue */
527 /* Set up rest to use this indirect table. */
532 desc = vq->split.vring.desc;
534 descs_used = total_sg;
537 if (vq->vq.num_free < descs_used) {
538 pr_debug("Can't add buf len %i - avail = %i\n",
539 descs_used, vq->vq.num_free);
540 /* FIXME: for historical reasons, we force a notify here if
541 * there are outgoing parts to the buffer. Presumably the
542 * host should service the ring ASAP. */
551 for (n = 0; n < out_sgs; n++) {
552 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
553 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
554 if (vring_mapping_error(vq, addr))
558 /* Note that we trust indirect descriptor
559 * table since it use stream DMA mapping.
561 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
566 for (; n < (out_sgs + in_sgs); n++) {
567 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
568 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
569 if (vring_mapping_error(vq, addr))
573 /* Note that we trust indirect descriptor
574 * table since it use stream DMA mapping.
576 i = virtqueue_add_desc_split(_vq, desc, i, addr,
581 buflen += sg->length;
584 /* Last one doesn't continue. */
585 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
586 if (!indirect && vq->use_dma_api)
587 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
591 /* Now that the indirect table is filled in, map it. */
592 dma_addr_t addr = vring_map_single(
593 vq, desc, total_sg * sizeof(struct vring_desc),
595 if (vring_mapping_error(vq, addr))
598 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
600 total_sg * sizeof(struct vring_desc),
601 VRING_DESC_F_INDIRECT,
605 /* We're using some buffers from the free list. */
606 vq->vq.num_free -= descs_used;
608 /* Update free pointer */
610 vq->free_head = vq->split.desc_extra[head].next;
614 /* Store token and indirect buffer state. */
615 vq->split.desc_state[head].data = data;
617 vq->split.desc_state[head].indir_desc = desc;
619 vq->split.desc_state[head].indir_desc = ctx;
621 /* Store in buffer length if necessary */
623 vq->buflen[head] = buflen;
625 /* Put entry in available array (but don't update avail->idx until they
627 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
628 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
630 /* Descriptors and available array need to be set before we expose the
631 * new available array entries. */
632 virtio_wmb(vq->weak_barriers);
633 vq->split.avail_idx_shadow++;
634 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
635 vq->split.avail_idx_shadow);
638 pr_debug("Added buffer head %i to %p\n", head, vq);
641 /* This is very unlikely, but theoretically possible. Kick
643 if (unlikely(vq->num_added == (1 << 16) - 1))
656 for (n = 0; n < total_sg; n++) {
660 vring_unmap_one_split_indirect(vq, &desc[i]);
661 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
663 i = vring_unmap_one_split(vq, i);
673 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
675 struct vring_virtqueue *vq = to_vvq(_vq);
680 /* We need to expose available array entries before checking avail
682 virtio_mb(vq->weak_barriers);
684 old = vq->split.avail_idx_shadow - vq->num_added;
685 new = vq->split.avail_idx_shadow;
688 LAST_ADD_TIME_CHECK(vq);
689 LAST_ADD_TIME_INVALID(vq);
692 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
693 vring_avail_event(&vq->split.vring)),
696 needs_kick = !(vq->split.vring.used->flags &
697 cpu_to_virtio16(_vq->vdev,
698 VRING_USED_F_NO_NOTIFY));
704 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
708 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
710 /* Clear data ptr. */
711 vq->split.desc_state[head].data = NULL;
713 /* Put back on free list: unmap first-level descriptors and find end */
716 while (vq->split.vring.desc[i].flags & nextflag) {
717 vring_unmap_one_split(vq, i);
718 i = vq->split.desc_extra[i].next;
722 vring_unmap_one_split(vq, i);
723 vq->split.desc_extra[i].next = vq->free_head;
724 vq->free_head = head;
726 /* Plus final descriptor */
730 struct vring_desc *indir_desc =
731 vq->split.desc_state[head].indir_desc;
734 /* Free the indirect table, if any, now that it's unmapped. */
738 len = vq->split.desc_extra[head].len;
740 BUG_ON(!(vq->split.desc_extra[head].flags &
741 VRING_DESC_F_INDIRECT));
742 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
744 for (j = 0; j < len / sizeof(struct vring_desc); j++)
745 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
748 vq->split.desc_state[head].indir_desc = NULL;
750 *ctx = vq->split.desc_state[head].indir_desc;
754 static inline bool more_used_split(const struct vring_virtqueue *vq)
756 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
757 vq->split.vring.used->idx);
760 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
764 struct vring_virtqueue *vq = to_vvq(_vq);
771 if (unlikely(vq->broken)) {
776 if (!more_used_split(vq)) {
777 pr_debug("No more buffers in queue\n");
782 /* Only get used array entries after they have been exposed by host. */
783 virtio_rmb(vq->weak_barriers);
785 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
786 i = virtio32_to_cpu(_vq->vdev,
787 vq->split.vring.used->ring[last_used].id);
788 *len = virtio32_to_cpu(_vq->vdev,
789 vq->split.vring.used->ring[last_used].len);
791 if (unlikely(i >= vq->split.vring.num)) {
792 BAD_RING(vq, "id %u out of range\n", i);
795 if (unlikely(!vq->split.desc_state[i].data)) {
796 BAD_RING(vq, "id %u is not a head!\n", i);
799 if (vq->buflen && unlikely(*len > vq->buflen[i])) {
800 BAD_RING(vq, "used len %d is larger than in buflen %u\n",
801 *len, vq->buflen[i]);
805 /* detach_buf_split clears data, so grab it now. */
806 ret = vq->split.desc_state[i].data;
807 detach_buf_split(vq, i, ctx);
809 /* If we expect an interrupt for the next entry, tell host
810 * by writing event index and flush out the write before
811 * the read in the next get_buf call. */
812 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
813 virtio_store_mb(vq->weak_barriers,
814 &vring_used_event(&vq->split.vring),
815 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
817 LAST_ADD_TIME_INVALID(vq);
823 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
825 struct vring_virtqueue *vq = to_vvq(_vq);
827 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
828 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
830 /* TODO: this is a hack. Figure out a cleaner value to write. */
831 vring_used_event(&vq->split.vring) = 0x0;
833 vq->split.vring.avail->flags =
834 cpu_to_virtio16(_vq->vdev,
835 vq->split.avail_flags_shadow);
839 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
841 struct vring_virtqueue *vq = to_vvq(_vq);
846 /* We optimistically turn back on interrupts, then check if there was
848 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
849 * either clear the flags bit or point the event index at the next
850 * entry. Always do both to keep code simple. */
851 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
852 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
854 vq->split.vring.avail->flags =
855 cpu_to_virtio16(_vq->vdev,
856 vq->split.avail_flags_shadow);
858 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
859 last_used_idx = vq->last_used_idx);
861 return last_used_idx;
864 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
866 struct vring_virtqueue *vq = to_vvq(_vq);
868 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
869 vq->split.vring.used->idx);
872 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
874 struct vring_virtqueue *vq = to_vvq(_vq);
879 /* We optimistically turn back on interrupts, then check if there was
881 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
882 * either clear the flags bit or point the event index at the next
883 * entry. Always update the event index to keep code simple. */
884 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
885 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
887 vq->split.vring.avail->flags =
888 cpu_to_virtio16(_vq->vdev,
889 vq->split.avail_flags_shadow);
891 /* TODO: tune this threshold */
892 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
894 virtio_store_mb(vq->weak_barriers,
895 &vring_used_event(&vq->split.vring),
896 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
898 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
899 - vq->last_used_idx) > bufs)) {
908 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
910 struct vring_virtqueue *vq = to_vvq(_vq);
916 for (i = 0; i < vq->split.vring.num; i++) {
917 if (!vq->split.desc_state[i].data)
919 /* detach_buf_split clears data, so grab it now. */
920 buf = vq->split.desc_state[i].data;
921 detach_buf_split(vq, i, NULL);
922 vq->split.avail_idx_shadow--;
923 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
924 vq->split.avail_idx_shadow);
928 /* That should have freed everything. */
929 BUG_ON(vq->vq.num_free != vq->split.vring.num);
935 static struct virtqueue *vring_create_virtqueue_split(
938 unsigned int vring_align,
939 struct virtio_device *vdev,
943 bool (*notify)(struct virtqueue *),
944 void (*callback)(struct virtqueue *),
947 struct virtqueue *vq;
950 size_t queue_size_in_bytes;
953 /* We assume num is a power of 2. */
954 if (num & (num - 1)) {
955 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
959 /* TODO: allocate each queue chunk individually */
960 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
961 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
963 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
974 /* Try to get a single page. You are my only hope! */
975 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
976 &dma_addr, GFP_KERNEL|__GFP_ZERO);
981 queue_size_in_bytes = vring_size(num, vring_align);
982 vring_init(&vring, num, queue, vring_align);
984 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
985 notify, callback, name);
987 vring_free_queue(vdev, queue_size_in_bytes, queue,
992 to_vvq(vq)->split.queue_dma_addr = dma_addr;
993 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
994 to_vvq(vq)->we_own_ring = true;
1001 * Packed ring specific functions - *_packed().
1004 static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
1005 struct vring_desc_extra *state)
1009 if (!vq->use_dma_api)
1012 flags = state->flags;
1014 if (flags & VRING_DESC_F_INDIRECT) {
1015 dma_unmap_single(vring_dma_dev(vq),
1016 state->addr, state->len,
1017 (flags & VRING_DESC_F_WRITE) ?
1018 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1020 dma_unmap_page(vring_dma_dev(vq),
1021 state->addr, state->len,
1022 (flags & VRING_DESC_F_WRITE) ?
1023 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1027 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1028 struct vring_packed_desc *desc)
1032 if (!vq->use_dma_api)
1035 flags = le16_to_cpu(desc->flags);
1037 if (flags & VRING_DESC_F_INDIRECT) {
1038 dma_unmap_single(vring_dma_dev(vq),
1039 le64_to_cpu(desc->addr),
1040 le32_to_cpu(desc->len),
1041 (flags & VRING_DESC_F_WRITE) ?
1042 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1044 dma_unmap_page(vring_dma_dev(vq),
1045 le64_to_cpu(desc->addr),
1046 le32_to_cpu(desc->len),
1047 (flags & VRING_DESC_F_WRITE) ?
1048 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1052 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1055 struct vring_packed_desc *desc;
1058 * We require lowmem mappings for the descriptors because
1059 * otherwise virt_to_phys will give us bogus addresses in the
1062 gfp &= ~__GFP_HIGHMEM;
1064 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1069 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1070 struct scatterlist *sgs[],
1071 unsigned int total_sg,
1072 unsigned int out_sgs,
1073 unsigned int in_sgs,
1077 struct vring_packed_desc *desc;
1078 struct scatterlist *sg;
1079 unsigned int i, n, err_idx;
1084 head = vq->packed.next_avail_idx;
1085 desc = alloc_indirect_packed(total_sg, gfp);
1089 if (unlikely(vq->vq.num_free < 1)) {
1090 pr_debug("Can't add buf len 1 - avail = 0\n");
1098 BUG_ON(id == vq->packed.vring.num);
1100 for (n = 0; n < out_sgs + in_sgs; n++) {
1101 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1102 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1103 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1104 if (vring_mapping_error(vq, addr))
1107 desc[i].flags = cpu_to_le16(n < out_sgs ?
1108 0 : VRING_DESC_F_WRITE);
1109 desc[i].addr = cpu_to_le64(addr);
1110 desc[i].len = cpu_to_le32(sg->length);
1113 buflen += sg->length;
1117 /* Now that the indirect table is filled in, map it. */
1118 addr = vring_map_single(vq, desc,
1119 total_sg * sizeof(struct vring_packed_desc),
1121 if (vring_mapping_error(vq, addr))
1124 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1125 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1126 sizeof(struct vring_packed_desc));
1127 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1129 if (vq->use_dma_api) {
1130 vq->packed.desc_extra[id].addr = addr;
1131 vq->packed.desc_extra[id].len = total_sg *
1132 sizeof(struct vring_packed_desc);
1133 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1134 vq->packed.avail_used_flags;
1138 * A driver MUST NOT make the first descriptor in the list
1139 * available before all subsequent descriptors comprising
1140 * the list are made available.
1142 virtio_wmb(vq->weak_barriers);
1143 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1144 vq->packed.avail_used_flags);
1146 /* We're using some buffers from the free list. */
1147 vq->vq.num_free -= 1;
1149 /* Update free pointer */
1151 if (n >= vq->packed.vring.num) {
1153 vq->packed.avail_wrap_counter ^= 1;
1154 vq->packed.avail_used_flags ^=
1155 1 << VRING_PACKED_DESC_F_AVAIL |
1156 1 << VRING_PACKED_DESC_F_USED;
1158 vq->packed.next_avail_idx = n;
1159 vq->free_head = vq->packed.desc_extra[id].next;
1161 /* Store token and indirect buffer state. */
1162 vq->packed.desc_state[id].num = 1;
1163 vq->packed.desc_state[id].data = data;
1164 vq->packed.desc_state[id].indir_desc = desc;
1165 vq->packed.desc_state[id].last = id;
1167 /* Store in buffer length if necessary */
1169 vq->buflen[id] = buflen;
1173 pr_debug("Added buffer head %i to %p\n", head, vq);
1181 for (i = 0; i < err_idx; i++)
1182 vring_unmap_desc_packed(vq, &desc[i]);
1190 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1191 struct scatterlist *sgs[],
1192 unsigned int total_sg,
1193 unsigned int out_sgs,
1194 unsigned int in_sgs,
1199 struct vring_virtqueue *vq = to_vvq(_vq);
1200 struct vring_packed_desc *desc;
1201 struct scatterlist *sg;
1202 unsigned int i, n, c, descs_used, err_idx;
1203 __le16 head_flags, flags;
1204 u16 head, id, prev, curr, avail_used_flags;
1210 BUG_ON(data == NULL);
1211 BUG_ON(ctx && vq->indirect);
1213 if (unlikely(vq->broken)) {
1218 LAST_ADD_TIME_UPDATE(vq);
1220 BUG_ON(total_sg == 0);
1222 if (virtqueue_use_indirect(_vq, total_sg)) {
1223 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1228 /* fall back on direct */
1231 head = vq->packed.next_avail_idx;
1232 avail_used_flags = vq->packed.avail_used_flags;
1234 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1236 desc = vq->packed.vring.desc;
1238 descs_used = total_sg;
1240 if (unlikely(vq->vq.num_free < descs_used)) {
1241 pr_debug("Can't add buf len %i - avail = %i\n",
1242 descs_used, vq->vq.num_free);
1248 BUG_ON(id == vq->packed.vring.num);
1252 for (n = 0; n < out_sgs + in_sgs; n++) {
1253 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1254 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1255 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1256 if (vring_mapping_error(vq, addr))
1259 flags = cpu_to_le16(vq->packed.avail_used_flags |
1260 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1261 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1265 desc[i].flags = flags;
1267 desc[i].addr = cpu_to_le64(addr);
1268 desc[i].len = cpu_to_le32(sg->length);
1269 desc[i].id = cpu_to_le16(id);
1271 if (unlikely(vq->use_dma_api)) {
1272 vq->packed.desc_extra[curr].addr = addr;
1273 vq->packed.desc_extra[curr].len = sg->length;
1274 vq->packed.desc_extra[curr].flags =
1278 curr = vq->packed.desc_extra[curr].next;
1280 if ((unlikely(++i >= vq->packed.vring.num))) {
1282 vq->packed.avail_used_flags ^=
1283 1 << VRING_PACKED_DESC_F_AVAIL |
1284 1 << VRING_PACKED_DESC_F_USED;
1287 buflen += sg->length;
1292 vq->packed.avail_wrap_counter ^= 1;
1294 /* We're using some buffers from the free list. */
1295 vq->vq.num_free -= descs_used;
1297 /* Update free pointer */
1298 vq->packed.next_avail_idx = i;
1299 vq->free_head = curr;
1302 vq->packed.desc_state[id].num = descs_used;
1303 vq->packed.desc_state[id].data = data;
1304 vq->packed.desc_state[id].indir_desc = ctx;
1305 vq->packed.desc_state[id].last = prev;
1307 /* Store in buffer length if necessary */
1309 vq->buflen[id] = buflen;
1312 * A driver MUST NOT make the first descriptor in the list
1313 * available before all subsequent descriptors comprising
1314 * the list are made available.
1316 virtio_wmb(vq->weak_barriers);
1317 vq->packed.vring.desc[head].flags = head_flags;
1318 vq->num_added += descs_used;
1320 pr_debug("Added buffer head %i to %p\n", head, vq);
1328 curr = vq->free_head;
1330 vq->packed.avail_used_flags = avail_used_flags;
1332 for (n = 0; n < total_sg; n++) {
1335 vring_unmap_state_packed(vq,
1336 &vq->packed.desc_extra[curr]);
1337 curr = vq->packed.desc_extra[curr].next;
1339 if (i >= vq->packed.vring.num)
1347 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1349 struct vring_virtqueue *vq = to_vvq(_vq);
1350 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1363 * We need to expose the new flags value before checking notification
1366 virtio_mb(vq->weak_barriers);
1368 old = vq->packed.next_avail_idx - vq->num_added;
1369 new = vq->packed.next_avail_idx;
1372 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1373 flags = le16_to_cpu(snapshot.flags);
1375 LAST_ADD_TIME_CHECK(vq);
1376 LAST_ADD_TIME_INVALID(vq);
1378 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1379 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1383 off_wrap = le16_to_cpu(snapshot.off_wrap);
1385 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1386 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1387 if (wrap_counter != vq->packed.avail_wrap_counter)
1388 event_idx -= vq->packed.vring.num;
1390 needs_kick = vring_need_event(event_idx, new, old);
1396 static void detach_buf_packed(struct vring_virtqueue *vq,
1397 unsigned int id, void **ctx)
1399 struct vring_desc_state_packed *state = NULL;
1400 struct vring_packed_desc *desc;
1401 unsigned int i, curr;
1403 state = &vq->packed.desc_state[id];
1405 /* Clear data ptr. */
1408 vq->packed.desc_extra[state->last].next = vq->free_head;
1410 vq->vq.num_free += state->num;
1412 if (unlikely(vq->use_dma_api)) {
1414 for (i = 0; i < state->num; i++) {
1415 vring_unmap_state_packed(vq,
1416 &vq->packed.desc_extra[curr]);
1417 curr = vq->packed.desc_extra[curr].next;
1424 /* Free the indirect table, if any, now that it's unmapped. */
1425 desc = state->indir_desc;
1429 if (vq->use_dma_api) {
1430 len = vq->packed.desc_extra[id].len;
1431 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1433 vring_unmap_desc_packed(vq, &desc[i]);
1436 state->indir_desc = NULL;
1438 *ctx = state->indir_desc;
1442 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1443 u16 idx, bool used_wrap_counter)
1448 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1449 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1450 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1452 return avail == used && used == used_wrap_counter;
1455 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1457 return is_used_desc_packed(vq, vq->last_used_idx,
1458 vq->packed.used_wrap_counter);
1461 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1465 struct vring_virtqueue *vq = to_vvq(_vq);
1471 if (unlikely(vq->broken)) {
1476 if (!more_used_packed(vq)) {
1477 pr_debug("No more buffers in queue\n");
1482 /* Only get used elements after they have been exposed by host. */
1483 virtio_rmb(vq->weak_barriers);
1485 last_used = vq->last_used_idx;
1486 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1487 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1489 if (unlikely(id >= vq->packed.vring.num)) {
1490 BAD_RING(vq, "id %u out of range\n", id);
1493 if (unlikely(!vq->packed.desc_state[id].data)) {
1494 BAD_RING(vq, "id %u is not a head!\n", id);
1497 if (vq->buflen && unlikely(*len > vq->buflen[id])) {
1498 BAD_RING(vq, "used len %d is larger than in buflen %u\n",
1499 *len, vq->buflen[id]);
1503 /* detach_buf_packed clears data, so grab it now. */
1504 ret = vq->packed.desc_state[id].data;
1505 detach_buf_packed(vq, id, ctx);
1507 vq->last_used_idx += vq->packed.desc_state[id].num;
1508 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1509 vq->last_used_idx -= vq->packed.vring.num;
1510 vq->packed.used_wrap_counter ^= 1;
1514 * If we expect an interrupt for the next entry, tell host
1515 * by writing event index and flush out the write before
1516 * the read in the next get_buf call.
1518 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1519 virtio_store_mb(vq->weak_barriers,
1520 &vq->packed.vring.driver->off_wrap,
1521 cpu_to_le16(vq->last_used_idx |
1522 (vq->packed.used_wrap_counter <<
1523 VRING_PACKED_EVENT_F_WRAP_CTR)));
1525 LAST_ADD_TIME_INVALID(vq);
1531 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1533 struct vring_virtqueue *vq = to_vvq(_vq);
1535 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1536 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1537 vq->packed.vring.driver->flags =
1538 cpu_to_le16(vq->packed.event_flags_shadow);
1542 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1544 struct vring_virtqueue *vq = to_vvq(_vq);
1549 * We optimistically turn back on interrupts, then check if there was
1554 vq->packed.vring.driver->off_wrap =
1555 cpu_to_le16(vq->last_used_idx |
1556 (vq->packed.used_wrap_counter <<
1557 VRING_PACKED_EVENT_F_WRAP_CTR));
1559 * We need to update event offset and event wrap
1560 * counter first before updating event flags.
1562 virtio_wmb(vq->weak_barriers);
1565 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1566 vq->packed.event_flags_shadow = vq->event ?
1567 VRING_PACKED_EVENT_FLAG_DESC :
1568 VRING_PACKED_EVENT_FLAG_ENABLE;
1569 vq->packed.vring.driver->flags =
1570 cpu_to_le16(vq->packed.event_flags_shadow);
1574 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1575 VRING_PACKED_EVENT_F_WRAP_CTR);
1578 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1580 struct vring_virtqueue *vq = to_vvq(_vq);
1584 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1585 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1587 return is_used_desc_packed(vq, used_idx, wrap_counter);
1590 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1592 struct vring_virtqueue *vq = to_vvq(_vq);
1593 u16 used_idx, wrap_counter;
1599 * We optimistically turn back on interrupts, then check if there was
1604 /* TODO: tune this threshold */
1605 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1606 wrap_counter = vq->packed.used_wrap_counter;
1608 used_idx = vq->last_used_idx + bufs;
1609 if (used_idx >= vq->packed.vring.num) {
1610 used_idx -= vq->packed.vring.num;
1614 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1615 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1618 * We need to update event offset and event wrap
1619 * counter first before updating event flags.
1621 virtio_wmb(vq->weak_barriers);
1624 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1625 vq->packed.event_flags_shadow = vq->event ?
1626 VRING_PACKED_EVENT_FLAG_DESC :
1627 VRING_PACKED_EVENT_FLAG_ENABLE;
1628 vq->packed.vring.driver->flags =
1629 cpu_to_le16(vq->packed.event_flags_shadow);
1633 * We need to update event suppression structure first
1634 * before re-checking for more used buffers.
1636 virtio_mb(vq->weak_barriers);
1638 if (is_used_desc_packed(vq,
1640 vq->packed.used_wrap_counter)) {
1649 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1651 struct vring_virtqueue *vq = to_vvq(_vq);
1657 for (i = 0; i < vq->packed.vring.num; i++) {
1658 if (!vq->packed.desc_state[i].data)
1660 /* detach_buf clears data, so grab it now. */
1661 buf = vq->packed.desc_state[i].data;
1662 detach_buf_packed(vq, i, NULL);
1666 /* That should have freed everything. */
1667 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1673 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1676 struct vring_desc_extra *desc_extra;
1679 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1684 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1686 for (i = 0; i < num - 1; i++)
1687 desc_extra[i].next = i + 1;
1692 static struct virtqueue *vring_create_virtqueue_packed(
1695 unsigned int vring_align,
1696 struct virtio_device *vdev,
1698 bool may_reduce_num,
1700 bool (*notify)(struct virtqueue *),
1701 void (*callback)(struct virtqueue *),
1704 struct vring_virtqueue *vq;
1705 struct vring_packed_desc *ring;
1706 struct vring_packed_desc_event *driver, *device;
1707 struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
1708 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1709 size_t ring_size_in_bytes, event_size_in_bytes;
1711 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1713 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1715 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1719 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1721 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1722 &driver_event_dma_addr,
1723 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1727 device = vring_alloc_queue(vdev, event_size_in_bytes,
1728 &device_event_dma_addr,
1729 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1733 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1737 vq->vq.callback = callback;
1740 vq->vq.num_free = num;
1741 vq->vq.index = index;
1742 vq->we_own_ring = true;
1743 vq->notify = notify;
1744 vq->weak_barriers = weak_barriers;
1746 vq->last_used_idx = 0;
1747 vq->event_triggered = false;
1749 vq->packed_ring = true;
1750 vq->use_dma_api = vring_use_dma_api(vdev);
1753 vq->last_add_time_valid = false;
1756 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1758 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1760 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1761 vq->weak_barriers = false;
1763 vq->packed.ring_dma_addr = ring_dma_addr;
1764 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1765 vq->packed.device_event_dma_addr = device_event_dma_addr;
1767 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1768 vq->packed.event_size_in_bytes = event_size_in_bytes;
1770 vq->packed.vring.num = num;
1771 vq->packed.vring.desc = ring;
1772 vq->packed.vring.driver = driver;
1773 vq->packed.vring.device = device;
1775 vq->packed.next_avail_idx = 0;
1776 vq->packed.avail_wrap_counter = 1;
1777 vq->packed.used_wrap_counter = 1;
1778 vq->packed.event_flags_shadow = 0;
1779 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1781 vq->packed.desc_state = kmalloc_array(num,
1782 sizeof(struct vring_desc_state_packed),
1784 if (!vq->packed.desc_state)
1785 goto err_desc_state;
1787 memset(vq->packed.desc_state, 0,
1788 num * sizeof(struct vring_desc_state_packed));
1790 /* Put everything in free lists. */
1793 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
1794 if (!vq->packed.desc_extra)
1795 goto err_desc_extra;
1797 if (!drv->suppress_used_validation || force_used_validation) {
1798 vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
1806 /* No callback? Tell other side not to bother us. */
1808 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1809 vq->packed.vring.driver->flags =
1810 cpu_to_le16(vq->packed.event_flags_shadow);
1813 spin_lock(&vdev->vqs_list_lock);
1814 list_add_tail(&vq->vq.list, &vdev->vqs);
1815 spin_unlock(&vdev->vqs_list_lock);
1819 kfree(vq->packed.desc_extra);
1821 kfree(vq->packed.desc_state);
1825 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1827 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1829 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1836 * Generic functions and exported symbols.
1839 static inline int virtqueue_add(struct virtqueue *_vq,
1840 struct scatterlist *sgs[],
1841 unsigned int total_sg,
1842 unsigned int out_sgs,
1843 unsigned int in_sgs,
1848 struct vring_virtqueue *vq = to_vvq(_vq);
1850 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1851 out_sgs, in_sgs, data, ctx, gfp) :
1852 virtqueue_add_split(_vq, sgs, total_sg,
1853 out_sgs, in_sgs, data, ctx, gfp);
1857 * virtqueue_add_sgs - expose buffers to other end
1858 * @_vq: the struct virtqueue we're talking about.
1859 * @sgs: array of terminated scatterlists.
1860 * @out_sgs: the number of scatterlists readable by other side
1861 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1862 * @data: the token identifying the buffer.
1863 * @gfp: how to do memory allocations (if necessary).
1865 * Caller must ensure we don't call this with other virtqueue operations
1866 * at the same time (except where noted).
1868 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1870 int virtqueue_add_sgs(struct virtqueue *_vq,
1871 struct scatterlist *sgs[],
1872 unsigned int out_sgs,
1873 unsigned int in_sgs,
1877 unsigned int i, total_sg = 0;
1879 /* Count them first. */
1880 for (i = 0; i < out_sgs + in_sgs; i++) {
1881 struct scatterlist *sg;
1883 for (sg = sgs[i]; sg; sg = sg_next(sg))
1886 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1889 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1892 * virtqueue_add_outbuf - expose output buffers to other end
1893 * @vq: the struct virtqueue we're talking about.
1894 * @sg: scatterlist (must be well-formed and terminated!)
1895 * @num: the number of entries in @sg readable by other side
1896 * @data: the token identifying the buffer.
1897 * @gfp: how to do memory allocations (if necessary).
1899 * Caller must ensure we don't call this with other virtqueue operations
1900 * at the same time (except where noted).
1902 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1904 int virtqueue_add_outbuf(struct virtqueue *vq,
1905 struct scatterlist *sg, unsigned int num,
1909 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1911 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1914 * virtqueue_add_inbuf - expose input buffers to other end
1915 * @vq: the struct virtqueue we're talking about.
1916 * @sg: scatterlist (must be well-formed and terminated!)
1917 * @num: the number of entries in @sg writable by other side
1918 * @data: the token identifying the buffer.
1919 * @gfp: how to do memory allocations (if necessary).
1921 * Caller must ensure we don't call this with other virtqueue operations
1922 * at the same time (except where noted).
1924 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1926 int virtqueue_add_inbuf(struct virtqueue *vq,
1927 struct scatterlist *sg, unsigned int num,
1931 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1933 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1936 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1937 * @vq: the struct virtqueue we're talking about.
1938 * @sg: scatterlist (must be well-formed and terminated!)
1939 * @num: the number of entries in @sg writable by other side
1940 * @data: the token identifying the buffer.
1941 * @ctx: extra context for the token
1942 * @gfp: how to do memory allocations (if necessary).
1944 * Caller must ensure we don't call this with other virtqueue operations
1945 * at the same time (except where noted).
1947 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1949 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1950 struct scatterlist *sg, unsigned int num,
1955 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1957 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1960 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1961 * @_vq: the struct virtqueue
1963 * Instead of virtqueue_kick(), you can do:
1964 * if (virtqueue_kick_prepare(vq))
1965 * virtqueue_notify(vq);
1967 * This is sometimes useful because the virtqueue_kick_prepare() needs
1968 * to be serialized, but the actual virtqueue_notify() call does not.
1970 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1972 struct vring_virtqueue *vq = to_vvq(_vq);
1974 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1975 virtqueue_kick_prepare_split(_vq);
1977 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1980 * virtqueue_notify - second half of split virtqueue_kick call.
1981 * @_vq: the struct virtqueue
1983 * This does not need to be serialized.
1985 * Returns false if host notify failed or queue is broken, otherwise true.
1987 bool virtqueue_notify(struct virtqueue *_vq)
1989 struct vring_virtqueue *vq = to_vvq(_vq);
1991 if (unlikely(vq->broken))
1994 /* Prod other side to tell it about changes. */
1995 if (!vq->notify(_vq)) {
2001 EXPORT_SYMBOL_GPL(virtqueue_notify);
2004 * virtqueue_kick - update after add_buf
2005 * @vq: the struct virtqueue
2007 * After one or more virtqueue_add_* calls, invoke this to kick
2010 * Caller must ensure we don't call this with other virtqueue
2011 * operations at the same time (except where noted).
2013 * Returns false if kick failed, otherwise true.
2015 bool virtqueue_kick(struct virtqueue *vq)
2017 if (virtqueue_kick_prepare(vq))
2018 return virtqueue_notify(vq);
2021 EXPORT_SYMBOL_GPL(virtqueue_kick);
2024 * virtqueue_get_buf_ctx - get the next used buffer
2025 * @_vq: the struct virtqueue we're talking about.
2026 * @len: the length written into the buffer
2027 * @ctx: extra context for the token
2029 * If the device wrote data into the buffer, @len will be set to the
2030 * amount written. This means you don't need to clear the buffer
2031 * beforehand to ensure there's no data leakage in the case of short
2034 * Caller must ensure we don't call this with other virtqueue
2035 * operations at the same time (except where noted).
2037 * Returns NULL if there are no used buffers, or the "data" token
2038 * handed to virtqueue_add_*().
2040 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2043 struct vring_virtqueue *vq = to_vvq(_vq);
2045 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
2046 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2048 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2050 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2052 return virtqueue_get_buf_ctx(_vq, len, NULL);
2054 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2056 * virtqueue_disable_cb - disable callbacks
2057 * @_vq: the struct virtqueue we're talking about.
2059 * Note that this is not necessarily synchronous, hence unreliable and only
2060 * useful as an optimization.
2062 * Unlike other operations, this need not be serialized.
2064 void virtqueue_disable_cb(struct virtqueue *_vq)
2066 struct vring_virtqueue *vq = to_vvq(_vq);
2068 /* If device triggered an event already it won't trigger one again:
2069 * no need to disable.
2071 if (vq->event_triggered)
2074 if (vq->packed_ring)
2075 virtqueue_disable_cb_packed(_vq);
2077 virtqueue_disable_cb_split(_vq);
2079 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2082 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2083 * @_vq: the struct virtqueue we're talking about.
2085 * This re-enables callbacks; it returns current queue state
2086 * in an opaque unsigned value. This value should be later tested by
2087 * virtqueue_poll, to detect a possible race between the driver checking for
2088 * more work, and enabling callbacks.
2090 * Caller must ensure we don't call this with other virtqueue
2091 * operations at the same time (except where noted).
2093 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2095 struct vring_virtqueue *vq = to_vvq(_vq);
2097 if (vq->event_triggered)
2098 vq->event_triggered = false;
2100 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2101 virtqueue_enable_cb_prepare_split(_vq);
2103 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2106 * virtqueue_poll - query pending used buffers
2107 * @_vq: the struct virtqueue we're talking about.
2108 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2110 * Returns "true" if there are pending used buffers in the queue.
2112 * This does not need to be serialized.
2114 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2116 struct vring_virtqueue *vq = to_vvq(_vq);
2118 if (unlikely(vq->broken))
2121 virtio_mb(vq->weak_barriers);
2122 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2123 virtqueue_poll_split(_vq, last_used_idx);
2125 EXPORT_SYMBOL_GPL(virtqueue_poll);
2128 * virtqueue_enable_cb - restart callbacks after disable_cb.
2129 * @_vq: the struct virtqueue we're talking about.
2131 * This re-enables callbacks; it returns "false" if there are pending
2132 * buffers in the queue, to detect a possible race between the driver
2133 * checking for more work, and enabling callbacks.
2135 * Caller must ensure we don't call this with other virtqueue
2136 * operations at the same time (except where noted).
2138 bool virtqueue_enable_cb(struct virtqueue *_vq)
2140 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2142 return !virtqueue_poll(_vq, last_used_idx);
2144 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2147 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2148 * @_vq: the struct virtqueue we're talking about.
2150 * This re-enables callbacks but hints to the other side to delay
2151 * interrupts until most of the available buffers have been processed;
2152 * it returns "false" if there are many pending buffers in the queue,
2153 * to detect a possible race between the driver checking for more work,
2154 * and enabling callbacks.
2156 * Caller must ensure we don't call this with other virtqueue
2157 * operations at the same time (except where noted).
2159 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2161 struct vring_virtqueue *vq = to_vvq(_vq);
2163 if (vq->event_triggered)
2164 vq->event_triggered = false;
2166 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2167 virtqueue_enable_cb_delayed_split(_vq);
2169 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2172 * virtqueue_detach_unused_buf - detach first unused buffer
2173 * @_vq: the struct virtqueue we're talking about.
2175 * Returns NULL or the "data" token handed to virtqueue_add_*().
2176 * This is not valid on an active queue; it is useful only for device
2179 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2181 struct vring_virtqueue *vq = to_vvq(_vq);
2183 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2184 virtqueue_detach_unused_buf_split(_vq);
2186 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2188 static inline bool more_used(const struct vring_virtqueue *vq)
2190 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2193 irqreturn_t vring_interrupt(int irq, void *_vq)
2195 struct vring_virtqueue *vq = to_vvq(_vq);
2197 if (!more_used(vq)) {
2198 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2202 if (unlikely(vq->broken))
2205 /* Just a hint for performance: so it's ok that this can be racy! */
2207 vq->event_triggered = true;
2209 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2210 if (vq->vq.callback)
2211 vq->vq.callback(&vq->vq);
2215 EXPORT_SYMBOL_GPL(vring_interrupt);
2217 /* Only available for split ring */
2218 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2220 struct virtio_device *vdev,
2223 bool (*notify)(struct virtqueue *),
2224 void (*callback)(struct virtqueue *),
2227 struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
2228 struct vring_virtqueue *vq;
2230 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2233 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2237 vq->packed_ring = false;
2238 vq->vq.callback = callback;
2241 vq->vq.num_free = vring.num;
2242 vq->vq.index = index;
2243 vq->we_own_ring = false;
2244 vq->notify = notify;
2245 vq->weak_barriers = weak_barriers;
2247 vq->last_used_idx = 0;
2248 vq->event_triggered = false;
2250 vq->use_dma_api = vring_use_dma_api(vdev);
2253 vq->last_add_time_valid = false;
2256 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2258 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2260 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2261 vq->weak_barriers = false;
2263 vq->split.queue_dma_addr = 0;
2264 vq->split.queue_size_in_bytes = 0;
2266 vq->split.vring = vring;
2267 vq->split.avail_flags_shadow = 0;
2268 vq->split.avail_idx_shadow = 0;
2270 /* No callback? Tell other side not to bother us. */
2272 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2274 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2275 vq->split.avail_flags_shadow);
2278 vq->split.desc_state = kmalloc_array(vring.num,
2279 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2280 if (!vq->split.desc_state)
2283 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
2284 if (!vq->split.desc_extra)
2287 if (!drv->suppress_used_validation || force_used_validation) {
2288 vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
2296 /* Put everything in free lists. */
2298 memset(vq->split.desc_state, 0, vring.num *
2299 sizeof(struct vring_desc_state_split));
2301 spin_lock(&vdev->vqs_list_lock);
2302 list_add_tail(&vq->vq.list, &vdev->vqs);
2303 spin_unlock(&vdev->vqs_list_lock);
2307 kfree(vq->split.desc_extra);
2309 kfree(vq->split.desc_state);
2314 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2316 struct virtqueue *vring_create_virtqueue(
2319 unsigned int vring_align,
2320 struct virtio_device *vdev,
2322 bool may_reduce_num,
2324 bool (*notify)(struct virtqueue *),
2325 void (*callback)(struct virtqueue *),
2329 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2330 return vring_create_virtqueue_packed(index, num, vring_align,
2331 vdev, weak_barriers, may_reduce_num,
2332 context, notify, callback, name);
2334 return vring_create_virtqueue_split(index, num, vring_align,
2335 vdev, weak_barriers, may_reduce_num,
2336 context, notify, callback, name);
2338 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2340 /* Only available for split ring */
2341 struct virtqueue *vring_new_virtqueue(unsigned int index,
2343 unsigned int vring_align,
2344 struct virtio_device *vdev,
2348 bool (*notify)(struct virtqueue *vq),
2349 void (*callback)(struct virtqueue *vq),
2354 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2357 vring_init(&vring, num, pages, vring_align);
2358 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2359 notify, callback, name);
2361 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2363 void vring_del_virtqueue(struct virtqueue *_vq)
2365 struct vring_virtqueue *vq = to_vvq(_vq);
2367 spin_lock(&vq->vq.vdev->vqs_list_lock);
2368 list_del(&_vq->list);
2369 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2371 if (vq->we_own_ring) {
2372 if (vq->packed_ring) {
2373 vring_free_queue(vq->vq.vdev,
2374 vq->packed.ring_size_in_bytes,
2375 vq->packed.vring.desc,
2376 vq->packed.ring_dma_addr);
2378 vring_free_queue(vq->vq.vdev,
2379 vq->packed.event_size_in_bytes,
2380 vq->packed.vring.driver,
2381 vq->packed.driver_event_dma_addr);
2383 vring_free_queue(vq->vq.vdev,
2384 vq->packed.event_size_in_bytes,
2385 vq->packed.vring.device,
2386 vq->packed.device_event_dma_addr);
2388 kfree(vq->packed.desc_state);
2389 kfree(vq->packed.desc_extra);
2391 vring_free_queue(vq->vq.vdev,
2392 vq->split.queue_size_in_bytes,
2393 vq->split.vring.desc,
2394 vq->split.queue_dma_addr);
2397 if (!vq->packed_ring) {
2398 kfree(vq->split.desc_state);
2399 kfree(vq->split.desc_extra);
2403 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2405 /* Manipulates transport-specific feature bits. */
2406 void vring_transport_features(struct virtio_device *vdev)
2410 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2412 case VIRTIO_RING_F_INDIRECT_DESC:
2414 case VIRTIO_RING_F_EVENT_IDX:
2416 case VIRTIO_F_VERSION_1:
2418 case VIRTIO_F_ACCESS_PLATFORM:
2420 case VIRTIO_F_RING_PACKED:
2422 case VIRTIO_F_ORDER_PLATFORM:
2425 /* We don't understand this bit. */
2426 __virtio_clear_bit(vdev, i);
2430 EXPORT_SYMBOL_GPL(vring_transport_features);
2433 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2434 * @_vq: the struct virtqueue containing the vring of interest.
2436 * Returns the size of the vring. This is mainly used for boasting to
2437 * userspace. Unlike other operations, this need not be serialized.
2439 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2442 struct vring_virtqueue *vq = to_vvq(_vq);
2444 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2446 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2448 bool virtqueue_is_broken(struct virtqueue *_vq)
2450 struct vring_virtqueue *vq = to_vvq(_vq);
2452 return READ_ONCE(vq->broken);
2454 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2457 * This should prevent the device from being used, allowing drivers to
2458 * recover. You may need to grab appropriate locks to flush.
2460 void virtio_break_device(struct virtio_device *dev)
2462 struct virtqueue *_vq;
2464 spin_lock(&dev->vqs_list_lock);
2465 list_for_each_entry(_vq, &dev->vqs, list) {
2466 struct vring_virtqueue *vq = to_vvq(_vq);
2468 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2469 WRITE_ONCE(vq->broken, true);
2471 spin_unlock(&dev->vqs_list_lock);
2473 EXPORT_SYMBOL_GPL(virtio_break_device);
2475 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2477 struct vring_virtqueue *vq = to_vvq(_vq);
2479 BUG_ON(!vq->we_own_ring);
2481 if (vq->packed_ring)
2482 return vq->packed.ring_dma_addr;
2484 return vq->split.queue_dma_addr;
2486 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2488 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2490 struct vring_virtqueue *vq = to_vvq(_vq);
2492 BUG_ON(!vq->we_own_ring);
2494 if (vq->packed_ring)
2495 return vq->packed.driver_event_dma_addr;
2497 return vq->split.queue_dma_addr +
2498 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2500 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2502 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2504 struct vring_virtqueue *vq = to_vvq(_vq);
2506 BUG_ON(!vq->we_own_ring);
2508 if (vq->packed_ring)
2509 return vq->packed.device_event_dma_addr;
2511 return vq->split.queue_dma_addr +
2512 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2514 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2516 /* Only available for split ring */
2517 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2519 return &to_vvq(vq)->split.vring;
2521 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2523 MODULE_LICENSE("GPL");