1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/spinlock.h>
18 /* For development, we want to crash whenever the ring is screwed. */
19 #define BAD_RING(_vq, fmt, args...) \
21 dev_err(&(_vq)->vq.vdev->dev, \
22 "%s:"fmt, (_vq)->vq.name, ##args); \
25 /* Caller is supposed to guarantee no reentry. */
26 #define START_USE(_vq) \
29 panic("%s:in_use = %i\n", \
30 (_vq)->vq.name, (_vq)->in_use); \
31 (_vq)->in_use = __LINE__; \
33 #define END_USE(_vq) \
34 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
35 #define LAST_ADD_TIME_UPDATE(_vq) \
37 ktime_t now = ktime_get(); \
39 /* No kick or get, with .1 second between? Warn. */ \
40 if ((_vq)->last_add_time_valid) \
41 WARN_ON(ktime_to_ms(ktime_sub(now, \
42 (_vq)->last_add_time)) > 100); \
43 (_vq)->last_add_time = now; \
44 (_vq)->last_add_time_valid = true; \
46 #define LAST_ADD_TIME_CHECK(_vq) \
48 if ((_vq)->last_add_time_valid) { \
49 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
50 (_vq)->last_add_time)) > 100); \
53 #define LAST_ADD_TIME_INVALID(_vq) \
54 ((_vq)->last_add_time_valid = false)
56 #define BAD_RING(_vq, fmt, args...) \
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
60 (_vq)->broken = true; \
64 #define LAST_ADD_TIME_UPDATE(vq)
65 #define LAST_ADD_TIME_CHECK(vq)
66 #define LAST_ADD_TIME_INVALID(vq)
69 struct vring_desc_state_split {
70 void *data; /* Data for callback. */
71 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
74 struct vring_desc_state_packed {
75 void *data; /* Data for callback. */
76 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
77 u16 num; /* Descriptor list length. */
78 u16 last; /* The last desc state in a list. */
81 struct vring_desc_extra {
82 dma_addr_t addr; /* Descriptor DMA addr. */
83 u32 len; /* Descriptor length. */
84 u16 flags; /* Descriptor flags. */
85 u16 next; /* The next desc state in a list. */
88 struct vring_virtqueue {
91 /* Is this a packed ring? */
94 /* Is DMA API used? */
97 /* Can we use weak barriers? */
100 /* Other side has made a mess, don't try any more. */
103 /* Host supports indirect buffers */
106 /* Host publishes avail event idx */
109 /* Head of free buffer list. */
110 unsigned int free_head;
111 /* Number we've added since last sync. */
112 unsigned int num_added;
114 /* Last used index we've seen. */
117 /* Hint for event idx: already triggered no need to disable. */
118 bool event_triggered;
121 /* Available for split ring */
123 /* Actual memory layout for this queue. */
126 /* Last written value to avail->flags */
127 u16 avail_flags_shadow;
130 * Last written value to avail->idx in
133 u16 avail_idx_shadow;
135 /* Per-descriptor state. */
136 struct vring_desc_state_split *desc_state;
137 struct vring_desc_extra *desc_extra;
139 /* DMA address and size information */
140 dma_addr_t queue_dma_addr;
141 size_t queue_size_in_bytes;
144 /* Available for packed ring */
146 /* Actual memory layout for this queue. */
149 struct vring_packed_desc *desc;
150 struct vring_packed_desc_event *driver;
151 struct vring_packed_desc_event *device;
154 /* Driver ring wrap counter. */
155 bool avail_wrap_counter;
157 /* Device ring wrap counter. */
158 bool used_wrap_counter;
160 /* Avail used flags. */
161 u16 avail_used_flags;
163 /* Index of the next avail descriptor. */
167 * Last written value to driver->flags in
170 u16 event_flags_shadow;
172 /* Per-descriptor state. */
173 struct vring_desc_state_packed *desc_state;
174 struct vring_desc_extra *desc_extra;
176 /* DMA address and size information */
177 dma_addr_t ring_dma_addr;
178 dma_addr_t driver_event_dma_addr;
179 dma_addr_t device_event_dma_addr;
180 size_t ring_size_in_bytes;
181 size_t event_size_in_bytes;
185 /* How to notify other side. FIXME: commonalize hcalls! */
186 bool (*notify)(struct virtqueue *vq);
188 /* DMA, allocation, and size information */
192 /* They're supposed to lock for us. */
195 /* Figure out if their kicks are too delayed. */
196 bool last_add_time_valid;
197 ktime_t last_add_time;
206 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
208 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
209 unsigned int total_sg)
211 struct vring_virtqueue *vq = to_vvq(_vq);
214 * If the host supports indirect descriptor tables, and we have multiple
215 * buffers, then go indirect. FIXME: tune this threshold
217 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
221 * Modern virtio devices have feature bits to specify whether they need a
222 * quirk and bypass the IOMMU. If not there, just use the DMA API.
224 * If there, the interaction between virtio and DMA API is messy.
226 * On most systems with virtio, physical addresses match bus addresses,
227 * and it doesn't particularly matter whether we use the DMA API.
229 * On some systems, including Xen and any system with a physical device
230 * that speaks virtio behind a physical IOMMU, we must use the DMA API
231 * for virtio DMA to work at all.
233 * On other systems, including SPARC and PPC64, virtio-pci devices are
234 * enumerated as though they are behind an IOMMU, but the virtio host
235 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
236 * there or somehow map everything as the identity.
238 * For the time being, we preserve historic behavior and bypass the DMA
241 * TODO: install a per-device DMA ops structure that does the right thing
242 * taking into account all the above quirks, and use the DMA API
243 * unconditionally on data path.
246 static bool vring_use_dma_api(struct virtio_device *vdev)
248 if (!virtio_has_dma_quirk(vdev))
251 /* Otherwise, we are left to guess. */
253 * In theory, it's possible to have a buggy QEMU-supposed
254 * emulated Q35 IOMMU and Xen enabled at the same time. On
255 * such a configuration, virtio has never worked and will
256 * not work without an even larger kludge. Instead, enable
257 * the DMA API if we're a Xen guest, which at least allows
258 * all of the sensible Xen configurations to work correctly.
266 size_t virtio_max_dma_size(struct virtio_device *vdev)
268 size_t max_segment_size = SIZE_MAX;
270 if (vring_use_dma_api(vdev))
271 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
273 return max_segment_size;
275 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
277 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
278 dma_addr_t *dma_handle, gfp_t flag)
280 if (vring_use_dma_api(vdev)) {
281 return dma_alloc_coherent(vdev->dev.parent, size,
284 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
287 phys_addr_t phys_addr = virt_to_phys(queue);
288 *dma_handle = (dma_addr_t)phys_addr;
291 * Sanity check: make sure we dind't truncate
292 * the address. The only arches I can find that
293 * have 64-bit phys_addr_t but 32-bit dma_addr_t
294 * are certain non-highmem MIPS and x86
295 * configurations, but these configurations
296 * should never allocate physical pages above 32
297 * bits, so this is fine. Just in case, throw a
298 * warning and abort if we end up with an
299 * unrepresentable address.
301 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
302 free_pages_exact(queue, PAGE_ALIGN(size));
310 static void vring_free_queue(struct virtio_device *vdev, size_t size,
311 void *queue, dma_addr_t dma_handle)
313 if (vring_use_dma_api(vdev))
314 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
316 free_pages_exact(queue, PAGE_ALIGN(size));
320 * The DMA ops on various arches are rather gnarly right now, and
321 * making all of the arch DMA ops work on the vring device itself
322 * is a mess. For now, we use the parent device for DMA ops.
324 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
326 return vq->vq.vdev->dev.parent;
329 /* Map one sg entry. */
330 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
331 struct scatterlist *sg,
332 enum dma_data_direction direction)
334 if (!vq->use_dma_api)
335 return (dma_addr_t)sg_phys(sg);
338 * We can't use dma_map_sg, because we don't use scatterlists in
339 * the way it expects (we don't guarantee that the scatterlist
340 * will exist for the lifetime of the mapping).
342 return dma_map_page(vring_dma_dev(vq),
343 sg_page(sg), sg->offset, sg->length,
347 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
348 void *cpu_addr, size_t size,
349 enum dma_data_direction direction)
351 if (!vq->use_dma_api)
352 return (dma_addr_t)virt_to_phys(cpu_addr);
354 return dma_map_single(vring_dma_dev(vq),
355 cpu_addr, size, direction);
358 static int vring_mapping_error(const struct vring_virtqueue *vq,
361 if (!vq->use_dma_api)
364 return dma_mapping_error(vring_dma_dev(vq), addr);
369 * Split ring specific functions - *_split().
372 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
373 struct vring_desc *desc)
377 if (!vq->use_dma_api)
380 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
382 dma_unmap_page(vring_dma_dev(vq),
383 virtio64_to_cpu(vq->vq.vdev, desc->addr),
384 virtio32_to_cpu(vq->vq.vdev, desc->len),
385 (flags & VRING_DESC_F_WRITE) ?
386 DMA_FROM_DEVICE : DMA_TO_DEVICE);
389 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
392 struct vring_desc_extra *extra = vq->split.desc_extra;
395 if (!vq->use_dma_api)
398 flags = extra[i].flags;
400 if (flags & VRING_DESC_F_INDIRECT) {
401 dma_unmap_single(vring_dma_dev(vq),
404 (flags & VRING_DESC_F_WRITE) ?
405 DMA_FROM_DEVICE : DMA_TO_DEVICE);
407 dma_unmap_page(vring_dma_dev(vq),
410 (flags & VRING_DESC_F_WRITE) ?
411 DMA_FROM_DEVICE : DMA_TO_DEVICE);
415 return extra[i].next;
418 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
419 unsigned int total_sg,
422 struct vring_desc *desc;
426 * We require lowmem mappings for the descriptors because
427 * otherwise virt_to_phys will give us bogus addresses in the
430 gfp &= ~__GFP_HIGHMEM;
432 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
436 for (i = 0; i < total_sg; i++)
437 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
441 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
442 struct vring_desc *desc,
449 struct vring_virtqueue *vring = to_vvq(vq);
450 struct vring_desc_extra *extra = vring->split.desc_extra;
453 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
454 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
455 desc[i].len = cpu_to_virtio32(vq->vdev, len);
458 next = extra[i].next;
459 desc[i].next = cpu_to_virtio16(vq->vdev, next);
461 extra[i].addr = addr;
463 extra[i].flags = flags;
465 next = virtio16_to_cpu(vq->vdev, desc[i].next);
470 static inline int virtqueue_add_split(struct virtqueue *_vq,
471 struct scatterlist *sgs[],
472 unsigned int total_sg,
473 unsigned int out_sgs,
479 struct vring_virtqueue *vq = to_vvq(_vq);
480 struct scatterlist *sg;
481 struct vring_desc *desc;
482 unsigned int i, n, avail, descs_used, prev, err_idx;
488 BUG_ON(data == NULL);
489 BUG_ON(ctx && vq->indirect);
491 if (unlikely(vq->broken)) {
496 LAST_ADD_TIME_UPDATE(vq);
498 BUG_ON(total_sg == 0);
500 head = vq->free_head;
502 if (virtqueue_use_indirect(_vq, total_sg))
503 desc = alloc_indirect_split(_vq, total_sg, gfp);
506 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
510 /* Use a single buffer which doesn't continue */
512 /* Set up rest to use this indirect table. */
517 desc = vq->split.vring.desc;
519 descs_used = total_sg;
522 if (vq->vq.num_free < descs_used) {
523 pr_debug("Can't add buf len %i - avail = %i\n",
524 descs_used, vq->vq.num_free);
525 /* FIXME: for historical reasons, we force a notify here if
526 * there are outgoing parts to the buffer. Presumably the
527 * host should service the ring ASAP. */
536 for (n = 0; n < out_sgs; n++) {
537 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
538 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
539 if (vring_mapping_error(vq, addr))
543 /* Note that we trust indirect descriptor
544 * table since it use stream DMA mapping.
546 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
551 for (; n < (out_sgs + in_sgs); n++) {
552 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
553 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
554 if (vring_mapping_error(vq, addr))
558 /* Note that we trust indirect descriptor
559 * table since it use stream DMA mapping.
561 i = virtqueue_add_desc_split(_vq, desc, i, addr,
568 /* Last one doesn't continue. */
569 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
570 if (!indirect && vq->use_dma_api)
571 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
575 /* Now that the indirect table is filled in, map it. */
576 dma_addr_t addr = vring_map_single(
577 vq, desc, total_sg * sizeof(struct vring_desc),
579 if (vring_mapping_error(vq, addr))
582 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
584 total_sg * sizeof(struct vring_desc),
585 VRING_DESC_F_INDIRECT,
589 /* We're using some buffers from the free list. */
590 vq->vq.num_free -= descs_used;
592 /* Update free pointer */
594 vq->free_head = vq->split.desc_extra[head].next;
598 /* Store token and indirect buffer state. */
599 vq->split.desc_state[head].data = data;
601 vq->split.desc_state[head].indir_desc = desc;
603 vq->split.desc_state[head].indir_desc = ctx;
605 /* Put entry in available array (but don't update avail->idx until they
607 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
608 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
610 /* Descriptors and available array need to be set before we expose the
611 * new available array entries. */
612 virtio_wmb(vq->weak_barriers);
613 vq->split.avail_idx_shadow++;
614 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
615 vq->split.avail_idx_shadow);
618 pr_debug("Added buffer head %i to %p\n", head, vq);
621 /* This is very unlikely, but theoretically possible. Kick
623 if (unlikely(vq->num_added == (1 << 16) - 1))
636 for (n = 0; n < total_sg; n++) {
640 vring_unmap_one_split_indirect(vq, &desc[i]);
641 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
643 i = vring_unmap_one_split(vq, i);
653 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
655 struct vring_virtqueue *vq = to_vvq(_vq);
660 /* We need to expose available array entries before checking avail
662 virtio_mb(vq->weak_barriers);
664 old = vq->split.avail_idx_shadow - vq->num_added;
665 new = vq->split.avail_idx_shadow;
668 LAST_ADD_TIME_CHECK(vq);
669 LAST_ADD_TIME_INVALID(vq);
672 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
673 vring_avail_event(&vq->split.vring)),
676 needs_kick = !(vq->split.vring.used->flags &
677 cpu_to_virtio16(_vq->vdev,
678 VRING_USED_F_NO_NOTIFY));
684 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
688 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
690 /* Clear data ptr. */
691 vq->split.desc_state[head].data = NULL;
693 /* Put back on free list: unmap first-level descriptors and find end */
696 while (vq->split.vring.desc[i].flags & nextflag) {
697 vring_unmap_one_split(vq, i);
698 i = vq->split.desc_extra[i].next;
702 vring_unmap_one_split(vq, i);
703 vq->split.desc_extra[i].next = vq->free_head;
704 vq->free_head = head;
706 /* Plus final descriptor */
710 struct vring_desc *indir_desc =
711 vq->split.desc_state[head].indir_desc;
714 /* Free the indirect table, if any, now that it's unmapped. */
718 len = vq->split.desc_extra[head].len;
720 BUG_ON(!(vq->split.desc_extra[head].flags &
721 VRING_DESC_F_INDIRECT));
722 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
724 for (j = 0; j < len / sizeof(struct vring_desc); j++)
725 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
728 vq->split.desc_state[head].indir_desc = NULL;
730 *ctx = vq->split.desc_state[head].indir_desc;
734 static inline bool more_used_split(const struct vring_virtqueue *vq)
736 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
737 vq->split.vring.used->idx);
740 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
744 struct vring_virtqueue *vq = to_vvq(_vq);
751 if (unlikely(vq->broken)) {
756 if (!more_used_split(vq)) {
757 pr_debug("No more buffers in queue\n");
762 /* Only get used array entries after they have been exposed by host. */
763 virtio_rmb(vq->weak_barriers);
765 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
766 i = virtio32_to_cpu(_vq->vdev,
767 vq->split.vring.used->ring[last_used].id);
768 *len = virtio32_to_cpu(_vq->vdev,
769 vq->split.vring.used->ring[last_used].len);
771 if (unlikely(i >= vq->split.vring.num)) {
772 BAD_RING(vq, "id %u out of range\n", i);
775 if (unlikely(!vq->split.desc_state[i].data)) {
776 BAD_RING(vq, "id %u is not a head!\n", i);
780 /* detach_buf_split clears data, so grab it now. */
781 ret = vq->split.desc_state[i].data;
782 detach_buf_split(vq, i, ctx);
784 /* If we expect an interrupt for the next entry, tell host
785 * by writing event index and flush out the write before
786 * the read in the next get_buf call. */
787 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
788 virtio_store_mb(vq->weak_barriers,
789 &vring_used_event(&vq->split.vring),
790 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
792 LAST_ADD_TIME_INVALID(vq);
798 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
800 struct vring_virtqueue *vq = to_vvq(_vq);
802 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
803 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
805 /* TODO: this is a hack. Figure out a cleaner value to write. */
806 vring_used_event(&vq->split.vring) = 0x0;
808 vq->split.vring.avail->flags =
809 cpu_to_virtio16(_vq->vdev,
810 vq->split.avail_flags_shadow);
814 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
816 struct vring_virtqueue *vq = to_vvq(_vq);
821 /* We optimistically turn back on interrupts, then check if there was
823 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
824 * either clear the flags bit or point the event index at the next
825 * entry. Always do both to keep code simple. */
826 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
827 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
829 vq->split.vring.avail->flags =
830 cpu_to_virtio16(_vq->vdev,
831 vq->split.avail_flags_shadow);
833 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
834 last_used_idx = vq->last_used_idx);
836 return last_used_idx;
839 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
841 struct vring_virtqueue *vq = to_vvq(_vq);
843 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
844 vq->split.vring.used->idx);
847 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
849 struct vring_virtqueue *vq = to_vvq(_vq);
854 /* We optimistically turn back on interrupts, then check if there was
856 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
857 * either clear the flags bit or point the event index at the next
858 * entry. Always update the event index to keep code simple. */
859 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
860 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
862 vq->split.vring.avail->flags =
863 cpu_to_virtio16(_vq->vdev,
864 vq->split.avail_flags_shadow);
866 /* TODO: tune this threshold */
867 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
869 virtio_store_mb(vq->weak_barriers,
870 &vring_used_event(&vq->split.vring),
871 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
873 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
874 - vq->last_used_idx) > bufs)) {
883 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
885 struct vring_virtqueue *vq = to_vvq(_vq);
891 for (i = 0; i < vq->split.vring.num; i++) {
892 if (!vq->split.desc_state[i].data)
894 /* detach_buf_split clears data, so grab it now. */
895 buf = vq->split.desc_state[i].data;
896 detach_buf_split(vq, i, NULL);
897 vq->split.avail_idx_shadow--;
898 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
899 vq->split.avail_idx_shadow);
903 /* That should have freed everything. */
904 BUG_ON(vq->vq.num_free != vq->split.vring.num);
910 static struct virtqueue *vring_create_virtqueue_split(
913 unsigned int vring_align,
914 struct virtio_device *vdev,
918 bool (*notify)(struct virtqueue *),
919 void (*callback)(struct virtqueue *),
922 struct virtqueue *vq;
925 size_t queue_size_in_bytes;
928 /* We assume num is a power of 2. */
929 if (num & (num - 1)) {
930 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
934 /* TODO: allocate each queue chunk individually */
935 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
936 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
938 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
949 /* Try to get a single page. You are my only hope! */
950 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
951 &dma_addr, GFP_KERNEL|__GFP_ZERO);
956 queue_size_in_bytes = vring_size(num, vring_align);
957 vring_init(&vring, num, queue, vring_align);
959 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
960 notify, callback, name);
962 vring_free_queue(vdev, queue_size_in_bytes, queue,
967 to_vvq(vq)->split.queue_dma_addr = dma_addr;
968 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
969 to_vvq(vq)->we_own_ring = true;
976 * Packed ring specific functions - *_packed().
979 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
980 struct vring_desc_extra *extra)
984 if (!vq->use_dma_api)
987 flags = extra->flags;
989 if (flags & VRING_DESC_F_INDIRECT) {
990 dma_unmap_single(vring_dma_dev(vq),
991 extra->addr, extra->len,
992 (flags & VRING_DESC_F_WRITE) ?
993 DMA_FROM_DEVICE : DMA_TO_DEVICE);
995 dma_unmap_page(vring_dma_dev(vq),
996 extra->addr, extra->len,
997 (flags & VRING_DESC_F_WRITE) ?
998 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1002 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1003 struct vring_packed_desc *desc)
1007 if (!vq->use_dma_api)
1010 flags = le16_to_cpu(desc->flags);
1012 dma_unmap_page(vring_dma_dev(vq),
1013 le64_to_cpu(desc->addr),
1014 le32_to_cpu(desc->len),
1015 (flags & VRING_DESC_F_WRITE) ?
1016 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1019 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1022 struct vring_packed_desc *desc;
1025 * We require lowmem mappings for the descriptors because
1026 * otherwise virt_to_phys will give us bogus addresses in the
1029 gfp &= ~__GFP_HIGHMEM;
1031 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1036 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1037 struct scatterlist *sgs[],
1038 unsigned int total_sg,
1039 unsigned int out_sgs,
1040 unsigned int in_sgs,
1044 struct vring_packed_desc *desc;
1045 struct scatterlist *sg;
1046 unsigned int i, n, err_idx;
1050 head = vq->packed.next_avail_idx;
1051 desc = alloc_indirect_packed(total_sg, gfp);
1055 if (unlikely(vq->vq.num_free < 1)) {
1056 pr_debug("Can't add buf len 1 - avail = 0\n");
1064 BUG_ON(id == vq->packed.vring.num);
1066 for (n = 0; n < out_sgs + in_sgs; n++) {
1067 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1068 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1069 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1070 if (vring_mapping_error(vq, addr))
1073 desc[i].flags = cpu_to_le16(n < out_sgs ?
1074 0 : VRING_DESC_F_WRITE);
1075 desc[i].addr = cpu_to_le64(addr);
1076 desc[i].len = cpu_to_le32(sg->length);
1081 /* Now that the indirect table is filled in, map it. */
1082 addr = vring_map_single(vq, desc,
1083 total_sg * sizeof(struct vring_packed_desc),
1085 if (vring_mapping_error(vq, addr))
1088 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1089 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1090 sizeof(struct vring_packed_desc));
1091 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1093 if (vq->use_dma_api) {
1094 vq->packed.desc_extra[id].addr = addr;
1095 vq->packed.desc_extra[id].len = total_sg *
1096 sizeof(struct vring_packed_desc);
1097 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1098 vq->packed.avail_used_flags;
1102 * A driver MUST NOT make the first descriptor in the list
1103 * available before all subsequent descriptors comprising
1104 * the list are made available.
1106 virtio_wmb(vq->weak_barriers);
1107 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1108 vq->packed.avail_used_flags);
1110 /* We're using some buffers from the free list. */
1111 vq->vq.num_free -= 1;
1113 /* Update free pointer */
1115 if (n >= vq->packed.vring.num) {
1117 vq->packed.avail_wrap_counter ^= 1;
1118 vq->packed.avail_used_flags ^=
1119 1 << VRING_PACKED_DESC_F_AVAIL |
1120 1 << VRING_PACKED_DESC_F_USED;
1122 vq->packed.next_avail_idx = n;
1123 vq->free_head = vq->packed.desc_extra[id].next;
1125 /* Store token and indirect buffer state. */
1126 vq->packed.desc_state[id].num = 1;
1127 vq->packed.desc_state[id].data = data;
1128 vq->packed.desc_state[id].indir_desc = desc;
1129 vq->packed.desc_state[id].last = id;
1133 pr_debug("Added buffer head %i to %p\n", head, vq);
1141 for (i = 0; i < err_idx; i++)
1142 vring_unmap_desc_packed(vq, &desc[i]);
1150 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1151 struct scatterlist *sgs[],
1152 unsigned int total_sg,
1153 unsigned int out_sgs,
1154 unsigned int in_sgs,
1159 struct vring_virtqueue *vq = to_vvq(_vq);
1160 struct vring_packed_desc *desc;
1161 struct scatterlist *sg;
1162 unsigned int i, n, c, descs_used, err_idx;
1163 __le16 head_flags, flags;
1164 u16 head, id, prev, curr, avail_used_flags;
1169 BUG_ON(data == NULL);
1170 BUG_ON(ctx && vq->indirect);
1172 if (unlikely(vq->broken)) {
1177 LAST_ADD_TIME_UPDATE(vq);
1179 BUG_ON(total_sg == 0);
1181 if (virtqueue_use_indirect(_vq, total_sg)) {
1182 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1184 if (err != -ENOMEM) {
1189 /* fall back on direct */
1192 head = vq->packed.next_avail_idx;
1193 avail_used_flags = vq->packed.avail_used_flags;
1195 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1197 desc = vq->packed.vring.desc;
1199 descs_used = total_sg;
1201 if (unlikely(vq->vq.num_free < descs_used)) {
1202 pr_debug("Can't add buf len %i - avail = %i\n",
1203 descs_used, vq->vq.num_free);
1209 BUG_ON(id == vq->packed.vring.num);
1213 for (n = 0; n < out_sgs + in_sgs; n++) {
1214 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1215 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1216 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1217 if (vring_mapping_error(vq, addr))
1220 flags = cpu_to_le16(vq->packed.avail_used_flags |
1221 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1222 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1226 desc[i].flags = flags;
1228 desc[i].addr = cpu_to_le64(addr);
1229 desc[i].len = cpu_to_le32(sg->length);
1230 desc[i].id = cpu_to_le16(id);
1232 if (unlikely(vq->use_dma_api)) {
1233 vq->packed.desc_extra[curr].addr = addr;
1234 vq->packed.desc_extra[curr].len = sg->length;
1235 vq->packed.desc_extra[curr].flags =
1239 curr = vq->packed.desc_extra[curr].next;
1241 if ((unlikely(++i >= vq->packed.vring.num))) {
1243 vq->packed.avail_used_flags ^=
1244 1 << VRING_PACKED_DESC_F_AVAIL |
1245 1 << VRING_PACKED_DESC_F_USED;
1251 vq->packed.avail_wrap_counter ^= 1;
1253 /* We're using some buffers from the free list. */
1254 vq->vq.num_free -= descs_used;
1256 /* Update free pointer */
1257 vq->packed.next_avail_idx = i;
1258 vq->free_head = curr;
1261 vq->packed.desc_state[id].num = descs_used;
1262 vq->packed.desc_state[id].data = data;
1263 vq->packed.desc_state[id].indir_desc = ctx;
1264 vq->packed.desc_state[id].last = prev;
1267 * A driver MUST NOT make the first descriptor in the list
1268 * available before all subsequent descriptors comprising
1269 * the list are made available.
1271 virtio_wmb(vq->weak_barriers);
1272 vq->packed.vring.desc[head].flags = head_flags;
1273 vq->num_added += descs_used;
1275 pr_debug("Added buffer head %i to %p\n", head, vq);
1283 curr = vq->free_head;
1285 vq->packed.avail_used_flags = avail_used_flags;
1287 for (n = 0; n < total_sg; n++) {
1290 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
1291 curr = vq->packed.desc_extra[curr].next;
1293 if (i >= vq->packed.vring.num)
1301 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1303 struct vring_virtqueue *vq = to_vvq(_vq);
1304 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1317 * We need to expose the new flags value before checking notification
1320 virtio_mb(vq->weak_barriers);
1322 old = vq->packed.next_avail_idx - vq->num_added;
1323 new = vq->packed.next_avail_idx;
1326 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1327 flags = le16_to_cpu(snapshot.flags);
1329 LAST_ADD_TIME_CHECK(vq);
1330 LAST_ADD_TIME_INVALID(vq);
1332 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1333 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1337 off_wrap = le16_to_cpu(snapshot.off_wrap);
1339 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1340 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1341 if (wrap_counter != vq->packed.avail_wrap_counter)
1342 event_idx -= vq->packed.vring.num;
1344 needs_kick = vring_need_event(event_idx, new, old);
1350 static void detach_buf_packed(struct vring_virtqueue *vq,
1351 unsigned int id, void **ctx)
1353 struct vring_desc_state_packed *state = NULL;
1354 struct vring_packed_desc *desc;
1355 unsigned int i, curr;
1357 state = &vq->packed.desc_state[id];
1359 /* Clear data ptr. */
1362 vq->packed.desc_extra[state->last].next = vq->free_head;
1364 vq->vq.num_free += state->num;
1366 if (unlikely(vq->use_dma_api)) {
1368 for (i = 0; i < state->num; i++) {
1369 vring_unmap_extra_packed(vq,
1370 &vq->packed.desc_extra[curr]);
1371 curr = vq->packed.desc_extra[curr].next;
1378 /* Free the indirect table, if any, now that it's unmapped. */
1379 desc = state->indir_desc;
1383 if (vq->use_dma_api) {
1384 len = vq->packed.desc_extra[id].len;
1385 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1387 vring_unmap_desc_packed(vq, &desc[i]);
1390 state->indir_desc = NULL;
1392 *ctx = state->indir_desc;
1396 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1397 u16 idx, bool used_wrap_counter)
1402 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1403 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1404 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1406 return avail == used && used == used_wrap_counter;
1409 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1411 return is_used_desc_packed(vq, vq->last_used_idx,
1412 vq->packed.used_wrap_counter);
1415 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1419 struct vring_virtqueue *vq = to_vvq(_vq);
1425 if (unlikely(vq->broken)) {
1430 if (!more_used_packed(vq)) {
1431 pr_debug("No more buffers in queue\n");
1436 /* Only get used elements after they have been exposed by host. */
1437 virtio_rmb(vq->weak_barriers);
1439 last_used = vq->last_used_idx;
1440 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1441 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1443 if (unlikely(id >= vq->packed.vring.num)) {
1444 BAD_RING(vq, "id %u out of range\n", id);
1447 if (unlikely(!vq->packed.desc_state[id].data)) {
1448 BAD_RING(vq, "id %u is not a head!\n", id);
1452 /* detach_buf_packed clears data, so grab it now. */
1453 ret = vq->packed.desc_state[id].data;
1454 detach_buf_packed(vq, id, ctx);
1456 vq->last_used_idx += vq->packed.desc_state[id].num;
1457 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1458 vq->last_used_idx -= vq->packed.vring.num;
1459 vq->packed.used_wrap_counter ^= 1;
1463 * If we expect an interrupt for the next entry, tell host
1464 * by writing event index and flush out the write before
1465 * the read in the next get_buf call.
1467 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1468 virtio_store_mb(vq->weak_barriers,
1469 &vq->packed.vring.driver->off_wrap,
1470 cpu_to_le16(vq->last_used_idx |
1471 (vq->packed.used_wrap_counter <<
1472 VRING_PACKED_EVENT_F_WRAP_CTR)));
1474 LAST_ADD_TIME_INVALID(vq);
1480 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1482 struct vring_virtqueue *vq = to_vvq(_vq);
1484 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1485 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1486 vq->packed.vring.driver->flags =
1487 cpu_to_le16(vq->packed.event_flags_shadow);
1491 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1493 struct vring_virtqueue *vq = to_vvq(_vq);
1498 * We optimistically turn back on interrupts, then check if there was
1503 vq->packed.vring.driver->off_wrap =
1504 cpu_to_le16(vq->last_used_idx |
1505 (vq->packed.used_wrap_counter <<
1506 VRING_PACKED_EVENT_F_WRAP_CTR));
1508 * We need to update event offset and event wrap
1509 * counter first before updating event flags.
1511 virtio_wmb(vq->weak_barriers);
1514 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1515 vq->packed.event_flags_shadow = vq->event ?
1516 VRING_PACKED_EVENT_FLAG_DESC :
1517 VRING_PACKED_EVENT_FLAG_ENABLE;
1518 vq->packed.vring.driver->flags =
1519 cpu_to_le16(vq->packed.event_flags_shadow);
1523 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1524 VRING_PACKED_EVENT_F_WRAP_CTR);
1527 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1529 struct vring_virtqueue *vq = to_vvq(_vq);
1533 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1534 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1536 return is_used_desc_packed(vq, used_idx, wrap_counter);
1539 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1541 struct vring_virtqueue *vq = to_vvq(_vq);
1542 u16 used_idx, wrap_counter;
1548 * We optimistically turn back on interrupts, then check if there was
1553 /* TODO: tune this threshold */
1554 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1555 wrap_counter = vq->packed.used_wrap_counter;
1557 used_idx = vq->last_used_idx + bufs;
1558 if (used_idx >= vq->packed.vring.num) {
1559 used_idx -= vq->packed.vring.num;
1563 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1564 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1567 * We need to update event offset and event wrap
1568 * counter first before updating event flags.
1570 virtio_wmb(vq->weak_barriers);
1573 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1574 vq->packed.event_flags_shadow = vq->event ?
1575 VRING_PACKED_EVENT_FLAG_DESC :
1576 VRING_PACKED_EVENT_FLAG_ENABLE;
1577 vq->packed.vring.driver->flags =
1578 cpu_to_le16(vq->packed.event_flags_shadow);
1582 * We need to update event suppression structure first
1583 * before re-checking for more used buffers.
1585 virtio_mb(vq->weak_barriers);
1587 if (is_used_desc_packed(vq,
1589 vq->packed.used_wrap_counter)) {
1598 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1600 struct vring_virtqueue *vq = to_vvq(_vq);
1606 for (i = 0; i < vq->packed.vring.num; i++) {
1607 if (!vq->packed.desc_state[i].data)
1609 /* detach_buf clears data, so grab it now. */
1610 buf = vq->packed.desc_state[i].data;
1611 detach_buf_packed(vq, i, NULL);
1615 /* That should have freed everything. */
1616 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1622 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1625 struct vring_desc_extra *desc_extra;
1628 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1633 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1635 for (i = 0; i < num - 1; i++)
1636 desc_extra[i].next = i + 1;
1641 static struct virtqueue *vring_create_virtqueue_packed(
1644 unsigned int vring_align,
1645 struct virtio_device *vdev,
1647 bool may_reduce_num,
1649 bool (*notify)(struct virtqueue *),
1650 void (*callback)(struct virtqueue *),
1653 struct vring_virtqueue *vq;
1654 struct vring_packed_desc *ring;
1655 struct vring_packed_desc_event *driver, *device;
1656 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1657 size_t ring_size_in_bytes, event_size_in_bytes;
1659 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1661 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1663 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1667 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1669 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1670 &driver_event_dma_addr,
1671 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1675 device = vring_alloc_queue(vdev, event_size_in_bytes,
1676 &device_event_dma_addr,
1677 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1681 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1685 vq->vq.callback = callback;
1688 vq->vq.num_free = num;
1689 vq->vq.index = index;
1690 vq->we_own_ring = true;
1691 vq->notify = notify;
1692 vq->weak_barriers = weak_barriers;
1694 vq->last_used_idx = 0;
1695 vq->event_triggered = false;
1697 vq->packed_ring = true;
1698 vq->use_dma_api = vring_use_dma_api(vdev);
1701 vq->last_add_time_valid = false;
1704 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1706 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1708 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1709 vq->weak_barriers = false;
1711 vq->packed.ring_dma_addr = ring_dma_addr;
1712 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1713 vq->packed.device_event_dma_addr = device_event_dma_addr;
1715 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1716 vq->packed.event_size_in_bytes = event_size_in_bytes;
1718 vq->packed.vring.num = num;
1719 vq->packed.vring.desc = ring;
1720 vq->packed.vring.driver = driver;
1721 vq->packed.vring.device = device;
1723 vq->packed.next_avail_idx = 0;
1724 vq->packed.avail_wrap_counter = 1;
1725 vq->packed.used_wrap_counter = 1;
1726 vq->packed.event_flags_shadow = 0;
1727 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1729 vq->packed.desc_state = kmalloc_array(num,
1730 sizeof(struct vring_desc_state_packed),
1732 if (!vq->packed.desc_state)
1733 goto err_desc_state;
1735 memset(vq->packed.desc_state, 0,
1736 num * sizeof(struct vring_desc_state_packed));
1738 /* Put everything in free lists. */
1741 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
1742 if (!vq->packed.desc_extra)
1743 goto err_desc_extra;
1745 /* No callback? Tell other side not to bother us. */
1747 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1748 vq->packed.vring.driver->flags =
1749 cpu_to_le16(vq->packed.event_flags_shadow);
1752 spin_lock(&vdev->vqs_list_lock);
1753 list_add_tail(&vq->vq.list, &vdev->vqs);
1754 spin_unlock(&vdev->vqs_list_lock);
1758 kfree(vq->packed.desc_state);
1762 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1764 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1766 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1773 * Generic functions and exported symbols.
1776 static inline int virtqueue_add(struct virtqueue *_vq,
1777 struct scatterlist *sgs[],
1778 unsigned int total_sg,
1779 unsigned int out_sgs,
1780 unsigned int in_sgs,
1785 struct vring_virtqueue *vq = to_vvq(_vq);
1787 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1788 out_sgs, in_sgs, data, ctx, gfp) :
1789 virtqueue_add_split(_vq, sgs, total_sg,
1790 out_sgs, in_sgs, data, ctx, gfp);
1794 * virtqueue_add_sgs - expose buffers to other end
1795 * @_vq: the struct virtqueue we're talking about.
1796 * @sgs: array of terminated scatterlists.
1797 * @out_sgs: the number of scatterlists readable by other side
1798 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1799 * @data: the token identifying the buffer.
1800 * @gfp: how to do memory allocations (if necessary).
1802 * Caller must ensure we don't call this with other virtqueue operations
1803 * at the same time (except where noted).
1805 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1807 int virtqueue_add_sgs(struct virtqueue *_vq,
1808 struct scatterlist *sgs[],
1809 unsigned int out_sgs,
1810 unsigned int in_sgs,
1814 unsigned int i, total_sg = 0;
1816 /* Count them first. */
1817 for (i = 0; i < out_sgs + in_sgs; i++) {
1818 struct scatterlist *sg;
1820 for (sg = sgs[i]; sg; sg = sg_next(sg))
1823 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1826 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1829 * virtqueue_add_outbuf - expose output buffers to other end
1830 * @vq: the struct virtqueue we're talking about.
1831 * @sg: scatterlist (must be well-formed and terminated!)
1832 * @num: the number of entries in @sg readable by other side
1833 * @data: the token identifying the buffer.
1834 * @gfp: how to do memory allocations (if necessary).
1836 * Caller must ensure we don't call this with other virtqueue operations
1837 * at the same time (except where noted).
1839 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1841 int virtqueue_add_outbuf(struct virtqueue *vq,
1842 struct scatterlist *sg, unsigned int num,
1846 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1848 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1851 * virtqueue_add_inbuf - expose input buffers to other end
1852 * @vq: the struct virtqueue we're talking about.
1853 * @sg: scatterlist (must be well-formed and terminated!)
1854 * @num: the number of entries in @sg writable by other side
1855 * @data: the token identifying the buffer.
1856 * @gfp: how to do memory allocations (if necessary).
1858 * Caller must ensure we don't call this with other virtqueue operations
1859 * at the same time (except where noted).
1861 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1863 int virtqueue_add_inbuf(struct virtqueue *vq,
1864 struct scatterlist *sg, unsigned int num,
1868 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1870 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1873 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1874 * @vq: the struct virtqueue we're talking about.
1875 * @sg: scatterlist (must be well-formed and terminated!)
1876 * @num: the number of entries in @sg writable by other side
1877 * @data: the token identifying the buffer.
1878 * @ctx: extra context for the token
1879 * @gfp: how to do memory allocations (if necessary).
1881 * Caller must ensure we don't call this with other virtqueue operations
1882 * at the same time (except where noted).
1884 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1886 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1887 struct scatterlist *sg, unsigned int num,
1892 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1894 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1897 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1898 * @_vq: the struct virtqueue
1900 * Instead of virtqueue_kick(), you can do:
1901 * if (virtqueue_kick_prepare(vq))
1902 * virtqueue_notify(vq);
1904 * This is sometimes useful because the virtqueue_kick_prepare() needs
1905 * to be serialized, but the actual virtqueue_notify() call does not.
1907 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1909 struct vring_virtqueue *vq = to_vvq(_vq);
1911 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1912 virtqueue_kick_prepare_split(_vq);
1914 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1917 * virtqueue_notify - second half of split virtqueue_kick call.
1918 * @_vq: the struct virtqueue
1920 * This does not need to be serialized.
1922 * Returns false if host notify failed or queue is broken, otherwise true.
1924 bool virtqueue_notify(struct virtqueue *_vq)
1926 struct vring_virtqueue *vq = to_vvq(_vq);
1928 if (unlikely(vq->broken))
1931 /* Prod other side to tell it about changes. */
1932 if (!vq->notify(_vq)) {
1938 EXPORT_SYMBOL_GPL(virtqueue_notify);
1941 * virtqueue_kick - update after add_buf
1942 * @vq: the struct virtqueue
1944 * After one or more virtqueue_add_* calls, invoke this to kick
1947 * Caller must ensure we don't call this with other virtqueue
1948 * operations at the same time (except where noted).
1950 * Returns false if kick failed, otherwise true.
1952 bool virtqueue_kick(struct virtqueue *vq)
1954 if (virtqueue_kick_prepare(vq))
1955 return virtqueue_notify(vq);
1958 EXPORT_SYMBOL_GPL(virtqueue_kick);
1961 * virtqueue_get_buf_ctx - get the next used buffer
1962 * @_vq: the struct virtqueue we're talking about.
1963 * @len: the length written into the buffer
1964 * @ctx: extra context for the token
1966 * If the device wrote data into the buffer, @len will be set to the
1967 * amount written. This means you don't need to clear the buffer
1968 * beforehand to ensure there's no data leakage in the case of short
1971 * Caller must ensure we don't call this with other virtqueue
1972 * operations at the same time (except where noted).
1974 * Returns NULL if there are no used buffers, or the "data" token
1975 * handed to virtqueue_add_*().
1977 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1980 struct vring_virtqueue *vq = to_vvq(_vq);
1982 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1983 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1985 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1987 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1989 return virtqueue_get_buf_ctx(_vq, len, NULL);
1991 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1993 * virtqueue_disable_cb - disable callbacks
1994 * @_vq: the struct virtqueue we're talking about.
1996 * Note that this is not necessarily synchronous, hence unreliable and only
1997 * useful as an optimization.
1999 * Unlike other operations, this need not be serialized.
2001 void virtqueue_disable_cb(struct virtqueue *_vq)
2003 struct vring_virtqueue *vq = to_vvq(_vq);
2005 /* If device triggered an event already it won't trigger one again:
2006 * no need to disable.
2008 if (vq->event_triggered)
2011 if (vq->packed_ring)
2012 virtqueue_disable_cb_packed(_vq);
2014 virtqueue_disable_cb_split(_vq);
2016 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2019 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2020 * @_vq: the struct virtqueue we're talking about.
2022 * This re-enables callbacks; it returns current queue state
2023 * in an opaque unsigned value. This value should be later tested by
2024 * virtqueue_poll, to detect a possible race between the driver checking for
2025 * more work, and enabling callbacks.
2027 * Caller must ensure we don't call this with other virtqueue
2028 * operations at the same time (except where noted).
2030 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2032 struct vring_virtqueue *vq = to_vvq(_vq);
2034 if (vq->event_triggered)
2035 vq->event_triggered = false;
2037 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2038 virtqueue_enable_cb_prepare_split(_vq);
2040 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2043 * virtqueue_poll - query pending used buffers
2044 * @_vq: the struct virtqueue we're talking about.
2045 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2047 * Returns "true" if there are pending used buffers in the queue.
2049 * This does not need to be serialized.
2051 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2053 struct vring_virtqueue *vq = to_vvq(_vq);
2055 if (unlikely(vq->broken))
2058 virtio_mb(vq->weak_barriers);
2059 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2060 virtqueue_poll_split(_vq, last_used_idx);
2062 EXPORT_SYMBOL_GPL(virtqueue_poll);
2065 * virtqueue_enable_cb - restart callbacks after disable_cb.
2066 * @_vq: the struct virtqueue we're talking about.
2068 * This re-enables callbacks; it returns "false" if there are pending
2069 * buffers in the queue, to detect a possible race between the driver
2070 * checking for more work, and enabling callbacks.
2072 * Caller must ensure we don't call this with other virtqueue
2073 * operations at the same time (except where noted).
2075 bool virtqueue_enable_cb(struct virtqueue *_vq)
2077 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2079 return !virtqueue_poll(_vq, last_used_idx);
2081 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2084 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2085 * @_vq: the struct virtqueue we're talking about.
2087 * This re-enables callbacks but hints to the other side to delay
2088 * interrupts until most of the available buffers have been processed;
2089 * it returns "false" if there are many pending buffers in the queue,
2090 * to detect a possible race between the driver checking for more work,
2091 * and enabling callbacks.
2093 * Caller must ensure we don't call this with other virtqueue
2094 * operations at the same time (except where noted).
2096 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2098 struct vring_virtqueue *vq = to_vvq(_vq);
2100 if (vq->event_triggered)
2101 vq->event_triggered = false;
2103 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2104 virtqueue_enable_cb_delayed_split(_vq);
2106 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2109 * virtqueue_detach_unused_buf - detach first unused buffer
2110 * @_vq: the struct virtqueue we're talking about.
2112 * Returns NULL or the "data" token handed to virtqueue_add_*().
2113 * This is not valid on an active queue; it is useful only for device
2116 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2118 struct vring_virtqueue *vq = to_vvq(_vq);
2120 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2121 virtqueue_detach_unused_buf_split(_vq);
2123 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2125 static inline bool more_used(const struct vring_virtqueue *vq)
2127 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2130 irqreturn_t vring_interrupt(int irq, void *_vq)
2132 struct vring_virtqueue *vq = to_vvq(_vq);
2134 if (!more_used(vq)) {
2135 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2139 if (unlikely(vq->broken))
2142 /* Just a hint for performance: so it's ok that this can be racy! */
2144 vq->event_triggered = true;
2146 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2147 if (vq->vq.callback)
2148 vq->vq.callback(&vq->vq);
2152 EXPORT_SYMBOL_GPL(vring_interrupt);
2154 /* Only available for split ring */
2155 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2157 struct virtio_device *vdev,
2160 bool (*notify)(struct virtqueue *),
2161 void (*callback)(struct virtqueue *),
2164 struct vring_virtqueue *vq;
2166 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2169 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2173 vq->packed_ring = false;
2174 vq->vq.callback = callback;
2177 vq->vq.num_free = vring.num;
2178 vq->vq.index = index;
2179 vq->we_own_ring = false;
2180 vq->notify = notify;
2181 vq->weak_barriers = weak_barriers;
2183 vq->last_used_idx = 0;
2184 vq->event_triggered = false;
2186 vq->use_dma_api = vring_use_dma_api(vdev);
2189 vq->last_add_time_valid = false;
2192 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2194 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2196 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2197 vq->weak_barriers = false;
2199 vq->split.queue_dma_addr = 0;
2200 vq->split.queue_size_in_bytes = 0;
2202 vq->split.vring = vring;
2203 vq->split.avail_flags_shadow = 0;
2204 vq->split.avail_idx_shadow = 0;
2206 /* No callback? Tell other side not to bother us. */
2208 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2210 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2211 vq->split.avail_flags_shadow);
2214 vq->split.desc_state = kmalloc_array(vring.num,
2215 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2216 if (!vq->split.desc_state)
2219 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
2220 if (!vq->split.desc_extra)
2223 /* Put everything in free lists. */
2225 memset(vq->split.desc_state, 0, vring.num *
2226 sizeof(struct vring_desc_state_split));
2228 spin_lock(&vdev->vqs_list_lock);
2229 list_add_tail(&vq->vq.list, &vdev->vqs);
2230 spin_unlock(&vdev->vqs_list_lock);
2234 kfree(vq->split.desc_state);
2239 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2241 struct virtqueue *vring_create_virtqueue(
2244 unsigned int vring_align,
2245 struct virtio_device *vdev,
2247 bool may_reduce_num,
2249 bool (*notify)(struct virtqueue *),
2250 void (*callback)(struct virtqueue *),
2254 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2255 return vring_create_virtqueue_packed(index, num, vring_align,
2256 vdev, weak_barriers, may_reduce_num,
2257 context, notify, callback, name);
2259 return vring_create_virtqueue_split(index, num, vring_align,
2260 vdev, weak_barriers, may_reduce_num,
2261 context, notify, callback, name);
2263 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2265 /* Only available for split ring */
2266 struct virtqueue *vring_new_virtqueue(unsigned int index,
2268 unsigned int vring_align,
2269 struct virtio_device *vdev,
2273 bool (*notify)(struct virtqueue *vq),
2274 void (*callback)(struct virtqueue *vq),
2279 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2282 vring_init(&vring, num, pages, vring_align);
2283 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2284 notify, callback, name);
2286 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2288 void vring_del_virtqueue(struct virtqueue *_vq)
2290 struct vring_virtqueue *vq = to_vvq(_vq);
2292 spin_lock(&vq->vq.vdev->vqs_list_lock);
2293 list_del(&_vq->list);
2294 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2296 if (vq->we_own_ring) {
2297 if (vq->packed_ring) {
2298 vring_free_queue(vq->vq.vdev,
2299 vq->packed.ring_size_in_bytes,
2300 vq->packed.vring.desc,
2301 vq->packed.ring_dma_addr);
2303 vring_free_queue(vq->vq.vdev,
2304 vq->packed.event_size_in_bytes,
2305 vq->packed.vring.driver,
2306 vq->packed.driver_event_dma_addr);
2308 vring_free_queue(vq->vq.vdev,
2309 vq->packed.event_size_in_bytes,
2310 vq->packed.vring.device,
2311 vq->packed.device_event_dma_addr);
2313 kfree(vq->packed.desc_state);
2314 kfree(vq->packed.desc_extra);
2316 vring_free_queue(vq->vq.vdev,
2317 vq->split.queue_size_in_bytes,
2318 vq->split.vring.desc,
2319 vq->split.queue_dma_addr);
2322 if (!vq->packed_ring) {
2323 kfree(vq->split.desc_state);
2324 kfree(vq->split.desc_extra);
2328 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2330 /* Manipulates transport-specific feature bits. */
2331 void vring_transport_features(struct virtio_device *vdev)
2335 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2337 case VIRTIO_RING_F_INDIRECT_DESC:
2339 case VIRTIO_RING_F_EVENT_IDX:
2341 case VIRTIO_F_VERSION_1:
2343 case VIRTIO_F_ACCESS_PLATFORM:
2345 case VIRTIO_F_RING_PACKED:
2347 case VIRTIO_F_ORDER_PLATFORM:
2350 /* We don't understand this bit. */
2351 __virtio_clear_bit(vdev, i);
2355 EXPORT_SYMBOL_GPL(vring_transport_features);
2358 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2359 * @_vq: the struct virtqueue containing the vring of interest.
2361 * Returns the size of the vring. This is mainly used for boasting to
2362 * userspace. Unlike other operations, this need not be serialized.
2364 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2367 struct vring_virtqueue *vq = to_vvq(_vq);
2369 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2371 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2373 bool virtqueue_is_broken(struct virtqueue *_vq)
2375 struct vring_virtqueue *vq = to_vvq(_vq);
2377 return READ_ONCE(vq->broken);
2379 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2382 * This should prevent the device from being used, allowing drivers to
2383 * recover. You may need to grab appropriate locks to flush.
2385 void virtio_break_device(struct virtio_device *dev)
2387 struct virtqueue *_vq;
2389 spin_lock(&dev->vqs_list_lock);
2390 list_for_each_entry(_vq, &dev->vqs, list) {
2391 struct vring_virtqueue *vq = to_vvq(_vq);
2393 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2394 WRITE_ONCE(vq->broken, true);
2396 spin_unlock(&dev->vqs_list_lock);
2398 EXPORT_SYMBOL_GPL(virtio_break_device);
2400 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2402 struct vring_virtqueue *vq = to_vvq(_vq);
2404 BUG_ON(!vq->we_own_ring);
2406 if (vq->packed_ring)
2407 return vq->packed.ring_dma_addr;
2409 return vq->split.queue_dma_addr;
2411 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2413 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2415 struct vring_virtqueue *vq = to_vvq(_vq);
2417 BUG_ON(!vq->we_own_ring);
2419 if (vq->packed_ring)
2420 return vq->packed.driver_event_dma_addr;
2422 return vq->split.queue_dma_addr +
2423 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2425 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2427 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2429 struct vring_virtqueue *vq = to_vvq(_vq);
2431 BUG_ON(!vq->we_own_ring);
2433 if (vq->packed_ring)
2434 return vq->packed.device_event_dma_addr;
2436 return vq->split.queue_dma_addr +
2437 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2439 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2441 /* Only available for split ring */
2442 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2444 return &to_vvq(vq)->split.vring;
2446 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2448 MODULE_LICENSE("GPL");