1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/spinlock.h>
18 /* For development, we want to crash whenever the ring is screwed. */
19 #define BAD_RING(_vq, fmt, args...) \
21 dev_err(&(_vq)->vq.vdev->dev, \
22 "%s:"fmt, (_vq)->vq.name, ##args); \
25 /* Caller is supposed to guarantee no reentry. */
26 #define START_USE(_vq) \
29 panic("%s:in_use = %i\n", \
30 (_vq)->vq.name, (_vq)->in_use); \
31 (_vq)->in_use = __LINE__; \
33 #define END_USE(_vq) \
34 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
35 #define LAST_ADD_TIME_UPDATE(_vq) \
37 ktime_t now = ktime_get(); \
39 /* No kick or get, with .1 second between? Warn. */ \
40 if ((_vq)->last_add_time_valid) \
41 WARN_ON(ktime_to_ms(ktime_sub(now, \
42 (_vq)->last_add_time)) > 100); \
43 (_vq)->last_add_time = now; \
44 (_vq)->last_add_time_valid = true; \
46 #define LAST_ADD_TIME_CHECK(_vq) \
48 if ((_vq)->last_add_time_valid) { \
49 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
50 (_vq)->last_add_time)) > 100); \
53 #define LAST_ADD_TIME_INVALID(_vq) \
54 ((_vq)->last_add_time_valid = false)
56 #define BAD_RING(_vq, fmt, args...) \
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
60 (_vq)->broken = true; \
64 #define LAST_ADD_TIME_UPDATE(vq)
65 #define LAST_ADD_TIME_CHECK(vq)
66 #define LAST_ADD_TIME_INVALID(vq)
69 struct vring_desc_state_split {
70 void *data; /* Data for callback. */
71 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
74 struct vring_desc_state_packed {
75 void *data; /* Data for callback. */
76 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
77 u16 num; /* Descriptor list length. */
78 u16 last; /* The last desc state in a list. */
81 struct vring_desc_extra {
82 dma_addr_t addr; /* Descriptor DMA addr. */
83 u32 len; /* Descriptor length. */
84 u16 flags; /* Descriptor flags. */
85 u16 next; /* The next desc state in a list. */
88 struct vring_virtqueue {
91 /* Is this a packed ring? */
94 /* Is DMA API used? */
97 /* Can we use weak barriers? */
100 /* Other side has made a mess, don't try any more. */
103 /* Host supports indirect buffers */
106 /* Host publishes avail event idx */
109 /* Head of free buffer list. */
110 unsigned int free_head;
111 /* Number we've added since last sync. */
112 unsigned int num_added;
114 /* Last used index we've seen. */
117 /* Hint for event idx: already triggered no need to disable. */
118 bool event_triggered;
121 /* Available for split ring */
123 /* Actual memory layout for this queue. */
126 /* Last written value to avail->flags */
127 u16 avail_flags_shadow;
130 * Last written value to avail->idx in
133 u16 avail_idx_shadow;
135 /* Per-descriptor state. */
136 struct vring_desc_state_split *desc_state;
137 struct vring_desc_extra *desc_extra;
139 /* DMA address and size information */
140 dma_addr_t queue_dma_addr;
141 size_t queue_size_in_bytes;
144 /* Available for packed ring */
146 /* Actual memory layout for this queue. */
149 struct vring_packed_desc *desc;
150 struct vring_packed_desc_event *driver;
151 struct vring_packed_desc_event *device;
154 /* Driver ring wrap counter. */
155 bool avail_wrap_counter;
157 /* Device ring wrap counter. */
158 bool used_wrap_counter;
160 /* Avail used flags. */
161 u16 avail_used_flags;
163 /* Index of the next avail descriptor. */
167 * Last written value to driver->flags in
170 u16 event_flags_shadow;
172 /* Per-descriptor state. */
173 struct vring_desc_state_packed *desc_state;
174 struct vring_desc_extra *desc_extra;
176 /* DMA address and size information */
177 dma_addr_t ring_dma_addr;
178 dma_addr_t driver_event_dma_addr;
179 dma_addr_t device_event_dma_addr;
180 size_t ring_size_in_bytes;
181 size_t event_size_in_bytes;
185 /* How to notify other side. FIXME: commonalize hcalls! */
186 bool (*notify)(struct virtqueue *vq);
188 /* DMA, allocation, and size information */
192 /* They're supposed to lock for us. */
195 /* Figure out if their kicks are too delayed. */
196 bool last_add_time_valid;
197 ktime_t last_add_time;
206 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
208 static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
209 unsigned int total_sg)
212 * If the host supports indirect descriptor tables, and we have multiple
213 * buffers, then go indirect. FIXME: tune this threshold
215 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
219 * Modern virtio devices have feature bits to specify whether they need a
220 * quirk and bypass the IOMMU. If not there, just use the DMA API.
222 * If there, the interaction between virtio and DMA API is messy.
224 * On most systems with virtio, physical addresses match bus addresses,
225 * and it doesn't particularly matter whether we use the DMA API.
227 * On some systems, including Xen and any system with a physical device
228 * that speaks virtio behind a physical IOMMU, we must use the DMA API
229 * for virtio DMA to work at all.
231 * On other systems, including SPARC and PPC64, virtio-pci devices are
232 * enumerated as though they are behind an IOMMU, but the virtio host
233 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
234 * there or somehow map everything as the identity.
236 * For the time being, we preserve historic behavior and bypass the DMA
239 * TODO: install a per-device DMA ops structure that does the right thing
240 * taking into account all the above quirks, and use the DMA API
241 * unconditionally on data path.
244 static bool vring_use_dma_api(struct virtio_device *vdev)
246 if (!virtio_has_dma_quirk(vdev))
249 /* Otherwise, we are left to guess. */
251 * In theory, it's possible to have a buggy QEMU-supposed
252 * emulated Q35 IOMMU and Xen enabled at the same time. On
253 * such a configuration, virtio has never worked and will
254 * not work without an even larger kludge. Instead, enable
255 * the DMA API if we're a Xen guest, which at least allows
256 * all of the sensible Xen configurations to work correctly.
264 size_t virtio_max_dma_size(struct virtio_device *vdev)
266 size_t max_segment_size = SIZE_MAX;
268 if (vring_use_dma_api(vdev))
269 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
271 return max_segment_size;
273 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
275 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
276 dma_addr_t *dma_handle, gfp_t flag)
278 if (vring_use_dma_api(vdev)) {
279 return dma_alloc_coherent(vdev->dev.parent, size,
282 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
285 phys_addr_t phys_addr = virt_to_phys(queue);
286 *dma_handle = (dma_addr_t)phys_addr;
289 * Sanity check: make sure we dind't truncate
290 * the address. The only arches I can find that
291 * have 64-bit phys_addr_t but 32-bit dma_addr_t
292 * are certain non-highmem MIPS and x86
293 * configurations, but these configurations
294 * should never allocate physical pages above 32
295 * bits, so this is fine. Just in case, throw a
296 * warning and abort if we end up with an
297 * unrepresentable address.
299 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
300 free_pages_exact(queue, PAGE_ALIGN(size));
308 static void vring_free_queue(struct virtio_device *vdev, size_t size,
309 void *queue, dma_addr_t dma_handle)
311 if (vring_use_dma_api(vdev))
312 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
314 free_pages_exact(queue, PAGE_ALIGN(size));
318 * The DMA ops on various arches are rather gnarly right now, and
319 * making all of the arch DMA ops work on the vring device itself
320 * is a mess. For now, we use the parent device for DMA ops.
322 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
324 return vq->vq.vdev->dev.parent;
327 /* Map one sg entry. */
328 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
329 struct scatterlist *sg,
330 enum dma_data_direction direction)
332 if (!vq->use_dma_api)
333 return (dma_addr_t)sg_phys(sg);
336 * We can't use dma_map_sg, because we don't use scatterlists in
337 * the way it expects (we don't guarantee that the scatterlist
338 * will exist for the lifetime of the mapping).
340 return dma_map_page(vring_dma_dev(vq),
341 sg_page(sg), sg->offset, sg->length,
345 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
346 void *cpu_addr, size_t size,
347 enum dma_data_direction direction)
349 if (!vq->use_dma_api)
350 return (dma_addr_t)virt_to_phys(cpu_addr);
352 return dma_map_single(vring_dma_dev(vq),
353 cpu_addr, size, direction);
356 static int vring_mapping_error(const struct vring_virtqueue *vq,
359 if (!vq->use_dma_api)
362 return dma_mapping_error(vring_dma_dev(vq), addr);
367 * Split ring specific functions - *_split().
370 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
371 struct vring_desc *desc)
375 if (!vq->use_dma_api)
378 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
380 dma_unmap_page(vring_dma_dev(vq),
381 virtio64_to_cpu(vq->vq.vdev, desc->addr),
382 virtio32_to_cpu(vq->vq.vdev, desc->len),
383 (flags & VRING_DESC_F_WRITE) ?
384 DMA_FROM_DEVICE : DMA_TO_DEVICE);
387 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
390 struct vring_desc_extra *extra = vq->split.desc_extra;
393 if (!vq->use_dma_api)
396 flags = extra[i].flags;
398 if (flags & VRING_DESC_F_INDIRECT) {
399 dma_unmap_single(vring_dma_dev(vq),
402 (flags & VRING_DESC_F_WRITE) ?
403 DMA_FROM_DEVICE : DMA_TO_DEVICE);
405 dma_unmap_page(vring_dma_dev(vq),
408 (flags & VRING_DESC_F_WRITE) ?
409 DMA_FROM_DEVICE : DMA_TO_DEVICE);
413 return extra[i].next;
416 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
417 unsigned int total_sg,
420 struct vring_desc *desc;
424 * We require lowmem mappings for the descriptors because
425 * otherwise virt_to_phys will give us bogus addresses in the
428 gfp &= ~__GFP_HIGHMEM;
430 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
434 for (i = 0; i < total_sg; i++)
435 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
439 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
440 struct vring_desc *desc,
447 struct vring_virtqueue *vring = to_vvq(vq);
448 struct vring_desc_extra *extra = vring->split.desc_extra;
451 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
452 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
453 desc[i].len = cpu_to_virtio32(vq->vdev, len);
456 next = extra[i].next;
457 desc[i].next = cpu_to_virtio16(vq->vdev, next);
459 extra[i].addr = addr;
461 extra[i].flags = flags;
463 next = virtio16_to_cpu(vq->vdev, desc[i].next);
468 static inline int virtqueue_add_split(struct virtqueue *_vq,
469 struct scatterlist *sgs[],
470 unsigned int total_sg,
471 unsigned int out_sgs,
477 struct vring_virtqueue *vq = to_vvq(_vq);
478 struct scatterlist *sg;
479 struct vring_desc *desc;
480 unsigned int i, n, avail, descs_used, prev, err_idx;
486 BUG_ON(data == NULL);
487 BUG_ON(ctx && vq->indirect);
489 if (unlikely(vq->broken)) {
494 LAST_ADD_TIME_UPDATE(vq);
496 BUG_ON(total_sg == 0);
498 head = vq->free_head;
500 if (virtqueue_use_indirect(vq, total_sg))
501 desc = alloc_indirect_split(_vq, total_sg, gfp);
504 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
508 /* Use a single buffer which doesn't continue */
510 /* Set up rest to use this indirect table. */
515 desc = vq->split.vring.desc;
517 descs_used = total_sg;
520 if (unlikely(vq->vq.num_free < descs_used)) {
521 pr_debug("Can't add buf len %i - avail = %i\n",
522 descs_used, vq->vq.num_free);
523 /* FIXME: for historical reasons, we force a notify here if
524 * there are outgoing parts to the buffer. Presumably the
525 * host should service the ring ASAP. */
534 for (n = 0; n < out_sgs; n++) {
535 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
536 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
537 if (vring_mapping_error(vq, addr))
541 /* Note that we trust indirect descriptor
542 * table since it use stream DMA mapping.
544 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
549 for (; n < (out_sgs + in_sgs); n++) {
550 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
551 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
552 if (vring_mapping_error(vq, addr))
556 /* Note that we trust indirect descriptor
557 * table since it use stream DMA mapping.
559 i = virtqueue_add_desc_split(_vq, desc, i, addr,
566 /* Last one doesn't continue. */
567 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
568 if (!indirect && vq->use_dma_api)
569 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
573 /* Now that the indirect table is filled in, map it. */
574 dma_addr_t addr = vring_map_single(
575 vq, desc, total_sg * sizeof(struct vring_desc),
577 if (vring_mapping_error(vq, addr))
580 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
582 total_sg * sizeof(struct vring_desc),
583 VRING_DESC_F_INDIRECT,
587 /* We're using some buffers from the free list. */
588 vq->vq.num_free -= descs_used;
590 /* Update free pointer */
592 vq->free_head = vq->split.desc_extra[head].next;
596 /* Store token and indirect buffer state. */
597 vq->split.desc_state[head].data = data;
599 vq->split.desc_state[head].indir_desc = desc;
601 vq->split.desc_state[head].indir_desc = ctx;
603 /* Put entry in available array (but don't update avail->idx until they
605 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
606 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
608 /* Descriptors and available array need to be set before we expose the
609 * new available array entries. */
610 virtio_wmb(vq->weak_barriers);
611 vq->split.avail_idx_shadow++;
612 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
613 vq->split.avail_idx_shadow);
616 pr_debug("Added buffer head %i to %p\n", head, vq);
619 /* This is very unlikely, but theoretically possible. Kick
621 if (unlikely(vq->num_added == (1 << 16) - 1))
634 for (n = 0; n < total_sg; n++) {
638 vring_unmap_one_split_indirect(vq, &desc[i]);
639 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
641 i = vring_unmap_one_split(vq, i);
651 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
653 struct vring_virtqueue *vq = to_vvq(_vq);
658 /* We need to expose available array entries before checking avail
660 virtio_mb(vq->weak_barriers);
662 old = vq->split.avail_idx_shadow - vq->num_added;
663 new = vq->split.avail_idx_shadow;
666 LAST_ADD_TIME_CHECK(vq);
667 LAST_ADD_TIME_INVALID(vq);
670 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
671 vring_avail_event(&vq->split.vring)),
674 needs_kick = !(vq->split.vring.used->flags &
675 cpu_to_virtio16(_vq->vdev,
676 VRING_USED_F_NO_NOTIFY));
682 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
686 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
688 /* Clear data ptr. */
689 vq->split.desc_state[head].data = NULL;
691 /* Put back on free list: unmap first-level descriptors and find end */
694 while (vq->split.vring.desc[i].flags & nextflag) {
695 vring_unmap_one_split(vq, i);
696 i = vq->split.desc_extra[i].next;
700 vring_unmap_one_split(vq, i);
701 vq->split.desc_extra[i].next = vq->free_head;
702 vq->free_head = head;
704 /* Plus final descriptor */
708 struct vring_desc *indir_desc =
709 vq->split.desc_state[head].indir_desc;
712 /* Free the indirect table, if any, now that it's unmapped. */
716 len = vq->split.desc_extra[head].len;
718 BUG_ON(!(vq->split.desc_extra[head].flags &
719 VRING_DESC_F_INDIRECT));
720 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
722 for (j = 0; j < len / sizeof(struct vring_desc); j++)
723 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
726 vq->split.desc_state[head].indir_desc = NULL;
728 *ctx = vq->split.desc_state[head].indir_desc;
732 static inline bool more_used_split(const struct vring_virtqueue *vq)
734 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
735 vq->split.vring.used->idx);
738 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
742 struct vring_virtqueue *vq = to_vvq(_vq);
749 if (unlikely(vq->broken)) {
754 if (!more_used_split(vq)) {
755 pr_debug("No more buffers in queue\n");
760 /* Only get used array entries after they have been exposed by host. */
761 virtio_rmb(vq->weak_barriers);
763 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
764 i = virtio32_to_cpu(_vq->vdev,
765 vq->split.vring.used->ring[last_used].id);
766 *len = virtio32_to_cpu(_vq->vdev,
767 vq->split.vring.used->ring[last_used].len);
769 if (unlikely(i >= vq->split.vring.num)) {
770 BAD_RING(vq, "id %u out of range\n", i);
773 if (unlikely(!vq->split.desc_state[i].data)) {
774 BAD_RING(vq, "id %u is not a head!\n", i);
778 /* detach_buf_split clears data, so grab it now. */
779 ret = vq->split.desc_state[i].data;
780 detach_buf_split(vq, i, ctx);
782 /* If we expect an interrupt for the next entry, tell host
783 * by writing event index and flush out the write before
784 * the read in the next get_buf call. */
785 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
786 virtio_store_mb(vq->weak_barriers,
787 &vring_used_event(&vq->split.vring),
788 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
790 LAST_ADD_TIME_INVALID(vq);
796 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
798 struct vring_virtqueue *vq = to_vvq(_vq);
800 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
801 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
803 /* TODO: this is a hack. Figure out a cleaner value to write. */
804 vring_used_event(&vq->split.vring) = 0x0;
806 vq->split.vring.avail->flags =
807 cpu_to_virtio16(_vq->vdev,
808 vq->split.avail_flags_shadow);
812 static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
814 struct vring_virtqueue *vq = to_vvq(_vq);
819 /* We optimistically turn back on interrupts, then check if there was
821 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
822 * either clear the flags bit or point the event index at the next
823 * entry. Always do both to keep code simple. */
824 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
825 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
827 vq->split.vring.avail->flags =
828 cpu_to_virtio16(_vq->vdev,
829 vq->split.avail_flags_shadow);
831 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
832 last_used_idx = vq->last_used_idx);
834 return last_used_idx;
837 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
839 struct vring_virtqueue *vq = to_vvq(_vq);
841 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
842 vq->split.vring.used->idx);
845 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
847 struct vring_virtqueue *vq = to_vvq(_vq);
852 /* We optimistically turn back on interrupts, then check if there was
854 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
855 * either clear the flags bit or point the event index at the next
856 * entry. Always update the event index to keep code simple. */
857 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
858 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
860 vq->split.vring.avail->flags =
861 cpu_to_virtio16(_vq->vdev,
862 vq->split.avail_flags_shadow);
864 /* TODO: tune this threshold */
865 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
867 virtio_store_mb(vq->weak_barriers,
868 &vring_used_event(&vq->split.vring),
869 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
871 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
872 - vq->last_used_idx) > bufs)) {
881 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
883 struct vring_virtqueue *vq = to_vvq(_vq);
889 for (i = 0; i < vq->split.vring.num; i++) {
890 if (!vq->split.desc_state[i].data)
892 /* detach_buf_split clears data, so grab it now. */
893 buf = vq->split.desc_state[i].data;
894 detach_buf_split(vq, i, NULL);
895 vq->split.avail_idx_shadow--;
896 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
897 vq->split.avail_idx_shadow);
901 /* That should have freed everything. */
902 BUG_ON(vq->vq.num_free != vq->split.vring.num);
908 static struct virtqueue *vring_create_virtqueue_split(
911 unsigned int vring_align,
912 struct virtio_device *vdev,
916 bool (*notify)(struct virtqueue *),
917 void (*callback)(struct virtqueue *),
920 struct virtqueue *vq;
923 size_t queue_size_in_bytes;
926 /* We assume num is a power of 2. */
927 if (num & (num - 1)) {
928 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
932 /* TODO: allocate each queue chunk individually */
933 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
934 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
936 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
947 /* Try to get a single page. You are my only hope! */
948 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
949 &dma_addr, GFP_KERNEL|__GFP_ZERO);
954 queue_size_in_bytes = vring_size(num, vring_align);
955 vring_init(&vring, num, queue, vring_align);
957 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
958 notify, callback, name);
960 vring_free_queue(vdev, queue_size_in_bytes, queue,
965 to_vvq(vq)->split.queue_dma_addr = dma_addr;
966 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
967 to_vvq(vq)->we_own_ring = true;
974 * Packed ring specific functions - *_packed().
977 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
978 struct vring_desc_extra *extra)
982 if (!vq->use_dma_api)
985 flags = extra->flags;
987 if (flags & VRING_DESC_F_INDIRECT) {
988 dma_unmap_single(vring_dma_dev(vq),
989 extra->addr, extra->len,
990 (flags & VRING_DESC_F_WRITE) ?
991 DMA_FROM_DEVICE : DMA_TO_DEVICE);
993 dma_unmap_page(vring_dma_dev(vq),
994 extra->addr, extra->len,
995 (flags & VRING_DESC_F_WRITE) ?
996 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1000 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1001 struct vring_packed_desc *desc)
1005 if (!vq->use_dma_api)
1008 flags = le16_to_cpu(desc->flags);
1010 dma_unmap_page(vring_dma_dev(vq),
1011 le64_to_cpu(desc->addr),
1012 le32_to_cpu(desc->len),
1013 (flags & VRING_DESC_F_WRITE) ?
1014 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1017 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1020 struct vring_packed_desc *desc;
1023 * We require lowmem mappings for the descriptors because
1024 * otherwise virt_to_phys will give us bogus addresses in the
1027 gfp &= ~__GFP_HIGHMEM;
1029 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1034 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1035 struct scatterlist *sgs[],
1036 unsigned int total_sg,
1037 unsigned int out_sgs,
1038 unsigned int in_sgs,
1042 struct vring_packed_desc *desc;
1043 struct scatterlist *sg;
1044 unsigned int i, n, err_idx;
1048 head = vq->packed.next_avail_idx;
1049 desc = alloc_indirect_packed(total_sg, gfp);
1053 if (unlikely(vq->vq.num_free < 1)) {
1054 pr_debug("Can't add buf len 1 - avail = 0\n");
1062 BUG_ON(id == vq->packed.vring.num);
1064 for (n = 0; n < out_sgs + in_sgs; n++) {
1065 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1066 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1067 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1068 if (vring_mapping_error(vq, addr))
1071 desc[i].flags = cpu_to_le16(n < out_sgs ?
1072 0 : VRING_DESC_F_WRITE);
1073 desc[i].addr = cpu_to_le64(addr);
1074 desc[i].len = cpu_to_le32(sg->length);
1079 /* Now that the indirect table is filled in, map it. */
1080 addr = vring_map_single(vq, desc,
1081 total_sg * sizeof(struct vring_packed_desc),
1083 if (vring_mapping_error(vq, addr))
1086 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1087 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1088 sizeof(struct vring_packed_desc));
1089 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1091 if (vq->use_dma_api) {
1092 vq->packed.desc_extra[id].addr = addr;
1093 vq->packed.desc_extra[id].len = total_sg *
1094 sizeof(struct vring_packed_desc);
1095 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1096 vq->packed.avail_used_flags;
1100 * A driver MUST NOT make the first descriptor in the list
1101 * available before all subsequent descriptors comprising
1102 * the list are made available.
1104 virtio_wmb(vq->weak_barriers);
1105 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1106 vq->packed.avail_used_flags);
1108 /* We're using some buffers from the free list. */
1109 vq->vq.num_free -= 1;
1111 /* Update free pointer */
1113 if (n >= vq->packed.vring.num) {
1115 vq->packed.avail_wrap_counter ^= 1;
1116 vq->packed.avail_used_flags ^=
1117 1 << VRING_PACKED_DESC_F_AVAIL |
1118 1 << VRING_PACKED_DESC_F_USED;
1120 vq->packed.next_avail_idx = n;
1121 vq->free_head = vq->packed.desc_extra[id].next;
1123 /* Store token and indirect buffer state. */
1124 vq->packed.desc_state[id].num = 1;
1125 vq->packed.desc_state[id].data = data;
1126 vq->packed.desc_state[id].indir_desc = desc;
1127 vq->packed.desc_state[id].last = id;
1131 pr_debug("Added buffer head %i to %p\n", head, vq);
1139 for (i = 0; i < err_idx; i++)
1140 vring_unmap_desc_packed(vq, &desc[i]);
1148 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1149 struct scatterlist *sgs[],
1150 unsigned int total_sg,
1151 unsigned int out_sgs,
1152 unsigned int in_sgs,
1157 struct vring_virtqueue *vq = to_vvq(_vq);
1158 struct vring_packed_desc *desc;
1159 struct scatterlist *sg;
1160 unsigned int i, n, c, descs_used, err_idx;
1161 __le16 head_flags, flags;
1162 u16 head, id, prev, curr, avail_used_flags;
1167 BUG_ON(data == NULL);
1168 BUG_ON(ctx && vq->indirect);
1170 if (unlikely(vq->broken)) {
1175 LAST_ADD_TIME_UPDATE(vq);
1177 BUG_ON(total_sg == 0);
1179 if (virtqueue_use_indirect(vq, total_sg)) {
1180 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1182 if (err != -ENOMEM) {
1187 /* fall back on direct */
1190 head = vq->packed.next_avail_idx;
1191 avail_used_flags = vq->packed.avail_used_flags;
1193 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1195 desc = vq->packed.vring.desc;
1197 descs_used = total_sg;
1199 if (unlikely(vq->vq.num_free < descs_used)) {
1200 pr_debug("Can't add buf len %i - avail = %i\n",
1201 descs_used, vq->vq.num_free);
1207 BUG_ON(id == vq->packed.vring.num);
1211 for (n = 0; n < out_sgs + in_sgs; n++) {
1212 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1213 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1214 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1215 if (vring_mapping_error(vq, addr))
1218 flags = cpu_to_le16(vq->packed.avail_used_flags |
1219 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1220 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1224 desc[i].flags = flags;
1226 desc[i].addr = cpu_to_le64(addr);
1227 desc[i].len = cpu_to_le32(sg->length);
1228 desc[i].id = cpu_to_le16(id);
1230 if (unlikely(vq->use_dma_api)) {
1231 vq->packed.desc_extra[curr].addr = addr;
1232 vq->packed.desc_extra[curr].len = sg->length;
1233 vq->packed.desc_extra[curr].flags =
1237 curr = vq->packed.desc_extra[curr].next;
1239 if ((unlikely(++i >= vq->packed.vring.num))) {
1241 vq->packed.avail_used_flags ^=
1242 1 << VRING_PACKED_DESC_F_AVAIL |
1243 1 << VRING_PACKED_DESC_F_USED;
1249 vq->packed.avail_wrap_counter ^= 1;
1251 /* We're using some buffers from the free list. */
1252 vq->vq.num_free -= descs_used;
1254 /* Update free pointer */
1255 vq->packed.next_avail_idx = i;
1256 vq->free_head = curr;
1259 vq->packed.desc_state[id].num = descs_used;
1260 vq->packed.desc_state[id].data = data;
1261 vq->packed.desc_state[id].indir_desc = ctx;
1262 vq->packed.desc_state[id].last = prev;
1265 * A driver MUST NOT make the first descriptor in the list
1266 * available before all subsequent descriptors comprising
1267 * the list are made available.
1269 virtio_wmb(vq->weak_barriers);
1270 vq->packed.vring.desc[head].flags = head_flags;
1271 vq->num_added += descs_used;
1273 pr_debug("Added buffer head %i to %p\n", head, vq);
1281 curr = vq->free_head;
1283 vq->packed.avail_used_flags = avail_used_flags;
1285 for (n = 0; n < total_sg; n++) {
1288 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
1289 curr = vq->packed.desc_extra[curr].next;
1291 if (i >= vq->packed.vring.num)
1299 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1301 struct vring_virtqueue *vq = to_vvq(_vq);
1302 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1315 * We need to expose the new flags value before checking notification
1318 virtio_mb(vq->weak_barriers);
1320 old = vq->packed.next_avail_idx - vq->num_added;
1321 new = vq->packed.next_avail_idx;
1324 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1325 flags = le16_to_cpu(snapshot.flags);
1327 LAST_ADD_TIME_CHECK(vq);
1328 LAST_ADD_TIME_INVALID(vq);
1330 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1331 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1335 off_wrap = le16_to_cpu(snapshot.off_wrap);
1337 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1338 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1339 if (wrap_counter != vq->packed.avail_wrap_counter)
1340 event_idx -= vq->packed.vring.num;
1342 needs_kick = vring_need_event(event_idx, new, old);
1348 static void detach_buf_packed(struct vring_virtqueue *vq,
1349 unsigned int id, void **ctx)
1351 struct vring_desc_state_packed *state = NULL;
1352 struct vring_packed_desc *desc;
1353 unsigned int i, curr;
1355 state = &vq->packed.desc_state[id];
1357 /* Clear data ptr. */
1360 vq->packed.desc_extra[state->last].next = vq->free_head;
1362 vq->vq.num_free += state->num;
1364 if (unlikely(vq->use_dma_api)) {
1366 for (i = 0; i < state->num; i++) {
1367 vring_unmap_extra_packed(vq,
1368 &vq->packed.desc_extra[curr]);
1369 curr = vq->packed.desc_extra[curr].next;
1376 /* Free the indirect table, if any, now that it's unmapped. */
1377 desc = state->indir_desc;
1381 if (vq->use_dma_api) {
1382 len = vq->packed.desc_extra[id].len;
1383 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1385 vring_unmap_desc_packed(vq, &desc[i]);
1388 state->indir_desc = NULL;
1390 *ctx = state->indir_desc;
1394 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1395 u16 idx, bool used_wrap_counter)
1400 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1401 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1402 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1404 return avail == used && used == used_wrap_counter;
1407 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1409 return is_used_desc_packed(vq, vq->last_used_idx,
1410 vq->packed.used_wrap_counter);
1413 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1417 struct vring_virtqueue *vq = to_vvq(_vq);
1423 if (unlikely(vq->broken)) {
1428 if (!more_used_packed(vq)) {
1429 pr_debug("No more buffers in queue\n");
1434 /* Only get used elements after they have been exposed by host. */
1435 virtio_rmb(vq->weak_barriers);
1437 last_used = vq->last_used_idx;
1438 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1439 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1441 if (unlikely(id >= vq->packed.vring.num)) {
1442 BAD_RING(vq, "id %u out of range\n", id);
1445 if (unlikely(!vq->packed.desc_state[id].data)) {
1446 BAD_RING(vq, "id %u is not a head!\n", id);
1450 /* detach_buf_packed clears data, so grab it now. */
1451 ret = vq->packed.desc_state[id].data;
1452 detach_buf_packed(vq, id, ctx);
1454 vq->last_used_idx += vq->packed.desc_state[id].num;
1455 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1456 vq->last_used_idx -= vq->packed.vring.num;
1457 vq->packed.used_wrap_counter ^= 1;
1461 * If we expect an interrupt for the next entry, tell host
1462 * by writing event index and flush out the write before
1463 * the read in the next get_buf call.
1465 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1466 virtio_store_mb(vq->weak_barriers,
1467 &vq->packed.vring.driver->off_wrap,
1468 cpu_to_le16(vq->last_used_idx |
1469 (vq->packed.used_wrap_counter <<
1470 VRING_PACKED_EVENT_F_WRAP_CTR)));
1472 LAST_ADD_TIME_INVALID(vq);
1478 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1480 struct vring_virtqueue *vq = to_vvq(_vq);
1482 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1483 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1484 vq->packed.vring.driver->flags =
1485 cpu_to_le16(vq->packed.event_flags_shadow);
1489 static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1491 struct vring_virtqueue *vq = to_vvq(_vq);
1496 * We optimistically turn back on interrupts, then check if there was
1501 vq->packed.vring.driver->off_wrap =
1502 cpu_to_le16(vq->last_used_idx |
1503 (vq->packed.used_wrap_counter <<
1504 VRING_PACKED_EVENT_F_WRAP_CTR));
1506 * We need to update event offset and event wrap
1507 * counter first before updating event flags.
1509 virtio_wmb(vq->weak_barriers);
1512 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1513 vq->packed.event_flags_shadow = vq->event ?
1514 VRING_PACKED_EVENT_FLAG_DESC :
1515 VRING_PACKED_EVENT_FLAG_ENABLE;
1516 vq->packed.vring.driver->flags =
1517 cpu_to_le16(vq->packed.event_flags_shadow);
1521 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1522 VRING_PACKED_EVENT_F_WRAP_CTR);
1525 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1527 struct vring_virtqueue *vq = to_vvq(_vq);
1531 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1532 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1534 return is_used_desc_packed(vq, used_idx, wrap_counter);
1537 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1539 struct vring_virtqueue *vq = to_vvq(_vq);
1540 u16 used_idx, wrap_counter;
1546 * We optimistically turn back on interrupts, then check if there was
1551 /* TODO: tune this threshold */
1552 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1553 wrap_counter = vq->packed.used_wrap_counter;
1555 used_idx = vq->last_used_idx + bufs;
1556 if (used_idx >= vq->packed.vring.num) {
1557 used_idx -= vq->packed.vring.num;
1561 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1562 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1565 * We need to update event offset and event wrap
1566 * counter first before updating event flags.
1568 virtio_wmb(vq->weak_barriers);
1571 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1572 vq->packed.event_flags_shadow = vq->event ?
1573 VRING_PACKED_EVENT_FLAG_DESC :
1574 VRING_PACKED_EVENT_FLAG_ENABLE;
1575 vq->packed.vring.driver->flags =
1576 cpu_to_le16(vq->packed.event_flags_shadow);
1580 * We need to update event suppression structure first
1581 * before re-checking for more used buffers.
1583 virtio_mb(vq->weak_barriers);
1585 if (is_used_desc_packed(vq,
1587 vq->packed.used_wrap_counter)) {
1596 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1598 struct vring_virtqueue *vq = to_vvq(_vq);
1604 for (i = 0; i < vq->packed.vring.num; i++) {
1605 if (!vq->packed.desc_state[i].data)
1607 /* detach_buf clears data, so grab it now. */
1608 buf = vq->packed.desc_state[i].data;
1609 detach_buf_packed(vq, i, NULL);
1613 /* That should have freed everything. */
1614 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1620 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1623 struct vring_desc_extra *desc_extra;
1626 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1631 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1633 for (i = 0; i < num - 1; i++)
1634 desc_extra[i].next = i + 1;
1639 static struct virtqueue *vring_create_virtqueue_packed(
1642 unsigned int vring_align,
1643 struct virtio_device *vdev,
1645 bool may_reduce_num,
1647 bool (*notify)(struct virtqueue *),
1648 void (*callback)(struct virtqueue *),
1651 struct vring_virtqueue *vq;
1652 struct vring_packed_desc *ring;
1653 struct vring_packed_desc_event *driver, *device;
1654 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1655 size_t ring_size_in_bytes, event_size_in_bytes;
1657 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1659 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1661 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1665 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1667 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1668 &driver_event_dma_addr,
1669 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1673 device = vring_alloc_queue(vdev, event_size_in_bytes,
1674 &device_event_dma_addr,
1675 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1679 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1683 vq->vq.callback = callback;
1686 vq->vq.num_free = num;
1687 vq->vq.index = index;
1688 vq->we_own_ring = true;
1689 vq->notify = notify;
1690 vq->weak_barriers = weak_barriers;
1692 vq->last_used_idx = 0;
1693 vq->event_triggered = false;
1695 vq->packed_ring = true;
1696 vq->use_dma_api = vring_use_dma_api(vdev);
1699 vq->last_add_time_valid = false;
1702 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1704 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1706 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1707 vq->weak_barriers = false;
1709 vq->packed.ring_dma_addr = ring_dma_addr;
1710 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1711 vq->packed.device_event_dma_addr = device_event_dma_addr;
1713 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1714 vq->packed.event_size_in_bytes = event_size_in_bytes;
1716 vq->packed.vring.num = num;
1717 vq->packed.vring.desc = ring;
1718 vq->packed.vring.driver = driver;
1719 vq->packed.vring.device = device;
1721 vq->packed.next_avail_idx = 0;
1722 vq->packed.avail_wrap_counter = 1;
1723 vq->packed.used_wrap_counter = 1;
1724 vq->packed.event_flags_shadow = 0;
1725 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1727 vq->packed.desc_state = kmalloc_array(num,
1728 sizeof(struct vring_desc_state_packed),
1730 if (!vq->packed.desc_state)
1731 goto err_desc_state;
1733 memset(vq->packed.desc_state, 0,
1734 num * sizeof(struct vring_desc_state_packed));
1736 /* Put everything in free lists. */
1739 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
1740 if (!vq->packed.desc_extra)
1741 goto err_desc_extra;
1743 /* No callback? Tell other side not to bother us. */
1745 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1746 vq->packed.vring.driver->flags =
1747 cpu_to_le16(vq->packed.event_flags_shadow);
1750 spin_lock(&vdev->vqs_list_lock);
1751 list_add_tail(&vq->vq.list, &vdev->vqs);
1752 spin_unlock(&vdev->vqs_list_lock);
1756 kfree(vq->packed.desc_state);
1760 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1762 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1764 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1771 * Generic functions and exported symbols.
1774 static inline int virtqueue_add(struct virtqueue *_vq,
1775 struct scatterlist *sgs[],
1776 unsigned int total_sg,
1777 unsigned int out_sgs,
1778 unsigned int in_sgs,
1783 struct vring_virtqueue *vq = to_vvq(_vq);
1785 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1786 out_sgs, in_sgs, data, ctx, gfp) :
1787 virtqueue_add_split(_vq, sgs, total_sg,
1788 out_sgs, in_sgs, data, ctx, gfp);
1792 * virtqueue_add_sgs - expose buffers to other end
1793 * @_vq: the struct virtqueue we're talking about.
1794 * @sgs: array of terminated scatterlists.
1795 * @out_sgs: the number of scatterlists readable by other side
1796 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1797 * @data: the token identifying the buffer.
1798 * @gfp: how to do memory allocations (if necessary).
1800 * Caller must ensure we don't call this with other virtqueue operations
1801 * at the same time (except where noted).
1803 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1805 int virtqueue_add_sgs(struct virtqueue *_vq,
1806 struct scatterlist *sgs[],
1807 unsigned int out_sgs,
1808 unsigned int in_sgs,
1812 unsigned int i, total_sg = 0;
1814 /* Count them first. */
1815 for (i = 0; i < out_sgs + in_sgs; i++) {
1816 struct scatterlist *sg;
1818 for (sg = sgs[i]; sg; sg = sg_next(sg))
1821 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1824 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1827 * virtqueue_add_outbuf - expose output buffers to other end
1828 * @vq: the struct virtqueue we're talking about.
1829 * @sg: scatterlist (must be well-formed and terminated!)
1830 * @num: the number of entries in @sg readable by other side
1831 * @data: the token identifying the buffer.
1832 * @gfp: how to do memory allocations (if necessary).
1834 * Caller must ensure we don't call this with other virtqueue operations
1835 * at the same time (except where noted).
1837 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1839 int virtqueue_add_outbuf(struct virtqueue *vq,
1840 struct scatterlist *sg, unsigned int num,
1844 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1846 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1849 * virtqueue_add_inbuf - expose input buffers to other end
1850 * @vq: the struct virtqueue we're talking about.
1851 * @sg: scatterlist (must be well-formed and terminated!)
1852 * @num: the number of entries in @sg writable by other side
1853 * @data: the token identifying the buffer.
1854 * @gfp: how to do memory allocations (if necessary).
1856 * Caller must ensure we don't call this with other virtqueue operations
1857 * at the same time (except where noted).
1859 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1861 int virtqueue_add_inbuf(struct virtqueue *vq,
1862 struct scatterlist *sg, unsigned int num,
1866 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1868 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1871 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1872 * @vq: the struct virtqueue we're talking about.
1873 * @sg: scatterlist (must be well-formed and terminated!)
1874 * @num: the number of entries in @sg writable by other side
1875 * @data: the token identifying the buffer.
1876 * @ctx: extra context for the token
1877 * @gfp: how to do memory allocations (if necessary).
1879 * Caller must ensure we don't call this with other virtqueue operations
1880 * at the same time (except where noted).
1882 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1884 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1885 struct scatterlist *sg, unsigned int num,
1890 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1892 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1895 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1896 * @_vq: the struct virtqueue
1898 * Instead of virtqueue_kick(), you can do:
1899 * if (virtqueue_kick_prepare(vq))
1900 * virtqueue_notify(vq);
1902 * This is sometimes useful because the virtqueue_kick_prepare() needs
1903 * to be serialized, but the actual virtqueue_notify() call does not.
1905 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1907 struct vring_virtqueue *vq = to_vvq(_vq);
1909 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1910 virtqueue_kick_prepare_split(_vq);
1912 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1915 * virtqueue_notify - second half of split virtqueue_kick call.
1916 * @_vq: the struct virtqueue
1918 * This does not need to be serialized.
1920 * Returns false if host notify failed or queue is broken, otherwise true.
1922 bool virtqueue_notify(struct virtqueue *_vq)
1924 struct vring_virtqueue *vq = to_vvq(_vq);
1926 if (unlikely(vq->broken))
1929 /* Prod other side to tell it about changes. */
1930 if (!vq->notify(_vq)) {
1936 EXPORT_SYMBOL_GPL(virtqueue_notify);
1939 * virtqueue_kick - update after add_buf
1940 * @vq: the struct virtqueue
1942 * After one or more virtqueue_add_* calls, invoke this to kick
1945 * Caller must ensure we don't call this with other virtqueue
1946 * operations at the same time (except where noted).
1948 * Returns false if kick failed, otherwise true.
1950 bool virtqueue_kick(struct virtqueue *vq)
1952 if (virtqueue_kick_prepare(vq))
1953 return virtqueue_notify(vq);
1956 EXPORT_SYMBOL_GPL(virtqueue_kick);
1959 * virtqueue_get_buf_ctx - get the next used buffer
1960 * @_vq: the struct virtqueue we're talking about.
1961 * @len: the length written into the buffer
1962 * @ctx: extra context for the token
1964 * If the device wrote data into the buffer, @len will be set to the
1965 * amount written. This means you don't need to clear the buffer
1966 * beforehand to ensure there's no data leakage in the case of short
1969 * Caller must ensure we don't call this with other virtqueue
1970 * operations at the same time (except where noted).
1972 * Returns NULL if there are no used buffers, or the "data" token
1973 * handed to virtqueue_add_*().
1975 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1978 struct vring_virtqueue *vq = to_vvq(_vq);
1980 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1981 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1983 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1985 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1987 return virtqueue_get_buf_ctx(_vq, len, NULL);
1989 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1991 * virtqueue_disable_cb - disable callbacks
1992 * @_vq: the struct virtqueue we're talking about.
1994 * Note that this is not necessarily synchronous, hence unreliable and only
1995 * useful as an optimization.
1997 * Unlike other operations, this need not be serialized.
1999 void virtqueue_disable_cb(struct virtqueue *_vq)
2001 struct vring_virtqueue *vq = to_vvq(_vq);
2003 /* If device triggered an event already it won't trigger one again:
2004 * no need to disable.
2006 if (vq->event_triggered)
2009 if (vq->packed_ring)
2010 virtqueue_disable_cb_packed(_vq);
2012 virtqueue_disable_cb_split(_vq);
2014 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2017 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2018 * @_vq: the struct virtqueue we're talking about.
2020 * This re-enables callbacks; it returns current queue state
2021 * in an opaque unsigned value. This value should be later tested by
2022 * virtqueue_poll, to detect a possible race between the driver checking for
2023 * more work, and enabling callbacks.
2025 * Caller must ensure we don't call this with other virtqueue
2026 * operations at the same time (except where noted).
2028 unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2030 struct vring_virtqueue *vq = to_vvq(_vq);
2032 if (vq->event_triggered)
2033 vq->event_triggered = false;
2035 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2036 virtqueue_enable_cb_prepare_split(_vq);
2038 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2041 * virtqueue_poll - query pending used buffers
2042 * @_vq: the struct virtqueue we're talking about.
2043 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2045 * Returns "true" if there are pending used buffers in the queue.
2047 * This does not need to be serialized.
2049 bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2051 struct vring_virtqueue *vq = to_vvq(_vq);
2053 if (unlikely(vq->broken))
2056 virtio_mb(vq->weak_barriers);
2057 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2058 virtqueue_poll_split(_vq, last_used_idx);
2060 EXPORT_SYMBOL_GPL(virtqueue_poll);
2063 * virtqueue_enable_cb - restart callbacks after disable_cb.
2064 * @_vq: the struct virtqueue we're talking about.
2066 * This re-enables callbacks; it returns "false" if there are pending
2067 * buffers in the queue, to detect a possible race between the driver
2068 * checking for more work, and enabling callbacks.
2070 * Caller must ensure we don't call this with other virtqueue
2071 * operations at the same time (except where noted).
2073 bool virtqueue_enable_cb(struct virtqueue *_vq)
2075 unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2077 return !virtqueue_poll(_vq, last_used_idx);
2079 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2082 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2083 * @_vq: the struct virtqueue we're talking about.
2085 * This re-enables callbacks but hints to the other side to delay
2086 * interrupts until most of the available buffers have been processed;
2087 * it returns "false" if there are many pending buffers in the queue,
2088 * to detect a possible race between the driver checking for more work,
2089 * and enabling callbacks.
2091 * Caller must ensure we don't call this with other virtqueue
2092 * operations at the same time (except where noted).
2094 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2096 struct vring_virtqueue *vq = to_vvq(_vq);
2098 if (vq->event_triggered)
2099 vq->event_triggered = false;
2101 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2102 virtqueue_enable_cb_delayed_split(_vq);
2104 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2107 * virtqueue_detach_unused_buf - detach first unused buffer
2108 * @_vq: the struct virtqueue we're talking about.
2110 * Returns NULL or the "data" token handed to virtqueue_add_*().
2111 * This is not valid on an active queue; it is useful only for device
2114 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2116 struct vring_virtqueue *vq = to_vvq(_vq);
2118 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2119 virtqueue_detach_unused_buf_split(_vq);
2121 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2123 static inline bool more_used(const struct vring_virtqueue *vq)
2125 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2128 irqreturn_t vring_interrupt(int irq, void *_vq)
2130 struct vring_virtqueue *vq = to_vvq(_vq);
2132 if (!more_used(vq)) {
2133 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2137 if (unlikely(vq->broken)) {
2138 dev_warn_once(&vq->vq.vdev->dev,
2139 "virtio vring IRQ raised before DRIVER_OK");
2143 /* Just a hint for performance: so it's ok that this can be racy! */
2145 vq->event_triggered = true;
2147 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2148 if (vq->vq.callback)
2149 vq->vq.callback(&vq->vq);
2153 EXPORT_SYMBOL_GPL(vring_interrupt);
2155 /* Only available for split ring */
2156 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2158 struct virtio_device *vdev,
2161 bool (*notify)(struct virtqueue *),
2162 void (*callback)(struct virtqueue *),
2165 struct vring_virtqueue *vq;
2167 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2170 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2174 vq->packed_ring = false;
2175 vq->vq.callback = callback;
2178 vq->vq.num_free = vring.num;
2179 vq->vq.index = index;
2180 vq->we_own_ring = false;
2181 vq->notify = notify;
2182 vq->weak_barriers = weak_barriers;
2184 vq->last_used_idx = 0;
2185 vq->event_triggered = false;
2187 vq->use_dma_api = vring_use_dma_api(vdev);
2190 vq->last_add_time_valid = false;
2193 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2195 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2197 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2198 vq->weak_barriers = false;
2200 vq->split.queue_dma_addr = 0;
2201 vq->split.queue_size_in_bytes = 0;
2203 vq->split.vring = vring;
2204 vq->split.avail_flags_shadow = 0;
2205 vq->split.avail_idx_shadow = 0;
2207 /* No callback? Tell other side not to bother us. */
2209 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2211 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2212 vq->split.avail_flags_shadow);
2215 vq->split.desc_state = kmalloc_array(vring.num,
2216 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2217 if (!vq->split.desc_state)
2220 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
2221 if (!vq->split.desc_extra)
2224 /* Put everything in free lists. */
2226 memset(vq->split.desc_state, 0, vring.num *
2227 sizeof(struct vring_desc_state_split));
2229 spin_lock(&vdev->vqs_list_lock);
2230 list_add_tail(&vq->vq.list, &vdev->vqs);
2231 spin_unlock(&vdev->vqs_list_lock);
2235 kfree(vq->split.desc_state);
2240 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2242 struct virtqueue *vring_create_virtqueue(
2245 unsigned int vring_align,
2246 struct virtio_device *vdev,
2248 bool may_reduce_num,
2250 bool (*notify)(struct virtqueue *),
2251 void (*callback)(struct virtqueue *),
2255 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2256 return vring_create_virtqueue_packed(index, num, vring_align,
2257 vdev, weak_barriers, may_reduce_num,
2258 context, notify, callback, name);
2260 return vring_create_virtqueue_split(index, num, vring_align,
2261 vdev, weak_barriers, may_reduce_num,
2262 context, notify, callback, name);
2264 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2266 /* Only available for split ring */
2267 struct virtqueue *vring_new_virtqueue(unsigned int index,
2269 unsigned int vring_align,
2270 struct virtio_device *vdev,
2274 bool (*notify)(struct virtqueue *vq),
2275 void (*callback)(struct virtqueue *vq),
2280 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2283 vring_init(&vring, num, pages, vring_align);
2284 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2285 notify, callback, name);
2287 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2289 void vring_del_virtqueue(struct virtqueue *_vq)
2291 struct vring_virtqueue *vq = to_vvq(_vq);
2293 spin_lock(&vq->vq.vdev->vqs_list_lock);
2294 list_del(&_vq->list);
2295 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2297 if (vq->we_own_ring) {
2298 if (vq->packed_ring) {
2299 vring_free_queue(vq->vq.vdev,
2300 vq->packed.ring_size_in_bytes,
2301 vq->packed.vring.desc,
2302 vq->packed.ring_dma_addr);
2304 vring_free_queue(vq->vq.vdev,
2305 vq->packed.event_size_in_bytes,
2306 vq->packed.vring.driver,
2307 vq->packed.driver_event_dma_addr);
2309 vring_free_queue(vq->vq.vdev,
2310 vq->packed.event_size_in_bytes,
2311 vq->packed.vring.device,
2312 vq->packed.device_event_dma_addr);
2314 kfree(vq->packed.desc_state);
2315 kfree(vq->packed.desc_extra);
2317 vring_free_queue(vq->vq.vdev,
2318 vq->split.queue_size_in_bytes,
2319 vq->split.vring.desc,
2320 vq->split.queue_dma_addr);
2323 if (!vq->packed_ring) {
2324 kfree(vq->split.desc_state);
2325 kfree(vq->split.desc_extra);
2329 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2331 /* Manipulates transport-specific feature bits. */
2332 void vring_transport_features(struct virtio_device *vdev)
2336 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2338 case VIRTIO_RING_F_INDIRECT_DESC:
2340 case VIRTIO_RING_F_EVENT_IDX:
2342 case VIRTIO_F_VERSION_1:
2344 case VIRTIO_F_ACCESS_PLATFORM:
2346 case VIRTIO_F_RING_PACKED:
2348 case VIRTIO_F_ORDER_PLATFORM:
2351 /* We don't understand this bit. */
2352 __virtio_clear_bit(vdev, i);
2356 EXPORT_SYMBOL_GPL(vring_transport_features);
2359 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2360 * @_vq: the struct virtqueue containing the vring of interest.
2362 * Returns the size of the vring. This is mainly used for boasting to
2363 * userspace. Unlike other operations, this need not be serialized.
2365 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2368 struct vring_virtqueue *vq = to_vvq(_vq);
2370 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2372 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2374 bool virtqueue_is_broken(struct virtqueue *_vq)
2376 struct vring_virtqueue *vq = to_vvq(_vq);
2378 return READ_ONCE(vq->broken);
2380 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2383 * This should prevent the device from being used, allowing drivers to
2384 * recover. You may need to grab appropriate locks to flush.
2386 void virtio_break_device(struct virtio_device *dev)
2388 struct virtqueue *_vq;
2390 spin_lock(&dev->vqs_list_lock);
2391 list_for_each_entry(_vq, &dev->vqs, list) {
2392 struct vring_virtqueue *vq = to_vvq(_vq);
2394 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2395 WRITE_ONCE(vq->broken, true);
2397 spin_unlock(&dev->vqs_list_lock);
2399 EXPORT_SYMBOL_GPL(virtio_break_device);
2402 * This should allow the device to be used by the driver. You may
2403 * need to grab appropriate locks to flush the write to
2404 * vq->broken. This should only be used in some specific case e.g
2405 * (probing and restoring). This function should only be called by the
2406 * core, not directly by the driver.
2408 void __virtio_unbreak_device(struct virtio_device *dev)
2410 struct virtqueue *_vq;
2412 spin_lock(&dev->vqs_list_lock);
2413 list_for_each_entry(_vq, &dev->vqs, list) {
2414 struct vring_virtqueue *vq = to_vvq(_vq);
2416 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2417 WRITE_ONCE(vq->broken, false);
2419 spin_unlock(&dev->vqs_list_lock);
2421 EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2423 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2425 struct vring_virtqueue *vq = to_vvq(_vq);
2427 BUG_ON(!vq->we_own_ring);
2429 if (vq->packed_ring)
2430 return vq->packed.ring_dma_addr;
2432 return vq->split.queue_dma_addr;
2434 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2436 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2438 struct vring_virtqueue *vq = to_vvq(_vq);
2440 BUG_ON(!vq->we_own_ring);
2442 if (vq->packed_ring)
2443 return vq->packed.driver_event_dma_addr;
2445 return vq->split.queue_dma_addr +
2446 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2448 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2450 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2452 struct vring_virtqueue *vq = to_vvq(_vq);
2454 BUG_ON(!vq->we_own_ring);
2456 if (vq->packed_ring)
2457 return vq->packed.device_event_dma_addr;
2459 return vq->split.queue_dma_addr +
2460 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2462 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2464 /* Only available for split ring */
2465 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2467 return &to_vvq(vq)->split.vring;
2469 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2471 MODULE_LICENSE("GPL");