1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
17 /* For development, we want to crash whenever the ring is screwed. */
18 #define BAD_RING(_vq, fmt, args...) \
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
24 /* Caller is supposed to guarantee no reentry. */
25 #define START_USE(_vq) \
28 panic("%s:in_use = %i\n", \
29 (_vq)->vq.name, (_vq)->in_use); \
30 (_vq)->in_use = __LINE__; \
32 #define END_USE(_vq) \
33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34 #define LAST_ADD_TIME_UPDATE(_vq) \
36 ktime_t now = ktime_get(); \
38 /* No kick or get, with .1 second between? Warn. */ \
39 if ((_vq)->last_add_time_valid) \
40 WARN_ON(ktime_to_ms(ktime_sub(now, \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
45 #define LAST_ADD_TIME_CHECK(_vq) \
47 if ((_vq)->last_add_time_valid) { \
48 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
49 (_vq)->last_add_time)) > 100); \
52 #define LAST_ADD_TIME_INVALID(_vq) \
53 ((_vq)->last_add_time_valid = false)
55 #define BAD_RING(_vq, fmt, args...) \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
63 #define LAST_ADD_TIME_UPDATE(vq)
64 #define LAST_ADD_TIME_CHECK(vq)
65 #define LAST_ADD_TIME_INVALID(vq)
68 struct vring_desc_state_split {
69 void *data; /* Data for callback. */
70 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
73 struct vring_desc_state_packed {
74 void *data; /* Data for callback. */
75 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
76 u16 num; /* Descriptor list length. */
77 u16 last; /* The last desc state in a list. */
80 struct vring_desc_extra {
81 dma_addr_t addr; /* Buffer DMA addr. */
82 u32 len; /* Buffer length. */
83 u16 flags; /* Descriptor flags. */
84 u16 next; /* The next desc state in a list. */
87 struct vring_virtqueue {
90 /* Is this a packed ring? */
93 /* Is DMA API used? */
96 /* Can we use weak barriers? */
99 /* Other side has made a mess, don't try any more. */
102 /* Host supports indirect buffers */
105 /* Host publishes avail event idx */
108 /* Head of free buffer list. */
109 unsigned int free_head;
110 /* Number we've added since last sync. */
111 unsigned int num_added;
113 /* Last used index we've seen. */
116 /* Hint for event idx: already triggered no need to disable. */
117 bool event_triggered;
120 /* Available for split ring */
122 /* Actual memory layout for this queue. */
125 /* Last written value to avail->flags */
126 u16 avail_flags_shadow;
129 * Last written value to avail->idx in
132 u16 avail_idx_shadow;
134 /* Per-descriptor state. */
135 struct vring_desc_state_split *desc_state;
137 /* DMA address and size information */
138 dma_addr_t queue_dma_addr;
139 size_t queue_size_in_bytes;
142 /* Available for packed ring */
144 /* Actual memory layout for this queue. */
147 struct vring_packed_desc *desc;
148 struct vring_packed_desc_event *driver;
149 struct vring_packed_desc_event *device;
152 /* Driver ring wrap counter. */
153 bool avail_wrap_counter;
155 /* Device ring wrap counter. */
156 bool used_wrap_counter;
158 /* Avail used flags. */
159 u16 avail_used_flags;
161 /* Index of the next avail descriptor. */
165 * Last written value to driver->flags in
168 u16 event_flags_shadow;
170 /* Per-descriptor state. */
171 struct vring_desc_state_packed *desc_state;
172 struct vring_desc_extra *desc_extra;
174 /* DMA address and size information */
175 dma_addr_t ring_dma_addr;
176 dma_addr_t driver_event_dma_addr;
177 dma_addr_t device_event_dma_addr;
178 size_t ring_size_in_bytes;
179 size_t event_size_in_bytes;
183 /* How to notify other side. FIXME: commonalize hcalls! */
184 bool (*notify)(struct virtqueue *vq);
186 /* DMA, allocation, and size information */
190 /* They're supposed to lock for us. */
193 /* Figure out if their kicks are too delayed. */
194 bool last_add_time_valid;
195 ktime_t last_add_time;
204 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
206 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
207 unsigned int total_sg)
209 struct vring_virtqueue *vq = to_vvq(_vq);
212 * If the host supports indirect descriptor tables, and we have multiple
213 * buffers, then go indirect. FIXME: tune this threshold
215 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
219 * Modern virtio devices have feature bits to specify whether they need a
220 * quirk and bypass the IOMMU. If not there, just use the DMA API.
222 * If there, the interaction between virtio and DMA API is messy.
224 * On most systems with virtio, physical addresses match bus addresses,
225 * and it doesn't particularly matter whether we use the DMA API.
227 * On some systems, including Xen and any system with a physical device
228 * that speaks virtio behind a physical IOMMU, we must use the DMA API
229 * for virtio DMA to work at all.
231 * On other systems, including SPARC and PPC64, virtio-pci devices are
232 * enumerated as though they are behind an IOMMU, but the virtio host
233 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
234 * there or somehow map everything as the identity.
236 * For the time being, we preserve historic behavior and bypass the DMA
239 * TODO: install a per-device DMA ops structure that does the right thing
240 * taking into account all the above quirks, and use the DMA API
241 * unconditionally on data path.
244 static bool vring_use_dma_api(struct virtio_device *vdev)
246 if (!virtio_has_dma_quirk(vdev))
249 /* Otherwise, we are left to guess. */
251 * In theory, it's possible to have a buggy QEMU-supposed
252 * emulated Q35 IOMMU and Xen enabled at the same time. On
253 * such a configuration, virtio has never worked and will
254 * not work without an even larger kludge. Instead, enable
255 * the DMA API if we're a Xen guest, which at least allows
256 * all of the sensible Xen configurations to work correctly.
264 size_t virtio_max_dma_size(struct virtio_device *vdev)
266 size_t max_segment_size = SIZE_MAX;
268 if (vring_use_dma_api(vdev))
269 max_segment_size = dma_max_mapping_size(&vdev->dev);
271 return max_segment_size;
273 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
275 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
276 dma_addr_t *dma_handle, gfp_t flag)
278 if (vring_use_dma_api(vdev)) {
279 return dma_alloc_coherent(vdev->dev.parent, size,
282 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
285 phys_addr_t phys_addr = virt_to_phys(queue);
286 *dma_handle = (dma_addr_t)phys_addr;
289 * Sanity check: make sure we dind't truncate
290 * the address. The only arches I can find that
291 * have 64-bit phys_addr_t but 32-bit dma_addr_t
292 * are certain non-highmem MIPS and x86
293 * configurations, but these configurations
294 * should never allocate physical pages above 32
295 * bits, so this is fine. Just in case, throw a
296 * warning and abort if we end up with an
297 * unrepresentable address.
299 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
300 free_pages_exact(queue, PAGE_ALIGN(size));
308 static void vring_free_queue(struct virtio_device *vdev, size_t size,
309 void *queue, dma_addr_t dma_handle)
311 if (vring_use_dma_api(vdev))
312 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
314 free_pages_exact(queue, PAGE_ALIGN(size));
318 * The DMA ops on various arches are rather gnarly right now, and
319 * making all of the arch DMA ops work on the vring device itself
320 * is a mess. For now, we use the parent device for DMA ops.
322 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
324 return vq->vq.vdev->dev.parent;
327 /* Map one sg entry. */
328 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
329 struct scatterlist *sg,
330 enum dma_data_direction direction)
332 if (!vq->use_dma_api)
333 return (dma_addr_t)sg_phys(sg);
336 * We can't use dma_map_sg, because we don't use scatterlists in
337 * the way it expects (we don't guarantee that the scatterlist
338 * will exist for the lifetime of the mapping).
340 return dma_map_page(vring_dma_dev(vq),
341 sg_page(sg), sg->offset, sg->length,
345 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
346 void *cpu_addr, size_t size,
347 enum dma_data_direction direction)
349 if (!vq->use_dma_api)
350 return (dma_addr_t)virt_to_phys(cpu_addr);
352 return dma_map_single(vring_dma_dev(vq),
353 cpu_addr, size, direction);
356 static int vring_mapping_error(const struct vring_virtqueue *vq,
359 if (!vq->use_dma_api)
362 return dma_mapping_error(vring_dma_dev(vq), addr);
367 * Split ring specific functions - *_split().
370 static void vring_unmap_one_split(const struct vring_virtqueue *vq,
371 struct vring_desc *desc)
375 if (!vq->use_dma_api)
378 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
380 if (flags & VRING_DESC_F_INDIRECT) {
381 dma_unmap_single(vring_dma_dev(vq),
382 virtio64_to_cpu(vq->vq.vdev, desc->addr),
383 virtio32_to_cpu(vq->vq.vdev, desc->len),
384 (flags & VRING_DESC_F_WRITE) ?
385 DMA_FROM_DEVICE : DMA_TO_DEVICE);
387 dma_unmap_page(vring_dma_dev(vq),
388 virtio64_to_cpu(vq->vq.vdev, desc->addr),
389 virtio32_to_cpu(vq->vq.vdev, desc->len),
390 (flags & VRING_DESC_F_WRITE) ?
391 DMA_FROM_DEVICE : DMA_TO_DEVICE);
395 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
396 unsigned int total_sg,
399 struct vring_desc *desc;
403 * We require lowmem mappings for the descriptors because
404 * otherwise virt_to_phys will give us bogus addresses in the
407 gfp &= ~__GFP_HIGHMEM;
409 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
413 for (i = 0; i < total_sg; i++)
414 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
418 static inline int virtqueue_add_split(struct virtqueue *_vq,
419 struct scatterlist *sgs[],
420 unsigned int total_sg,
421 unsigned int out_sgs,
427 struct vring_virtqueue *vq = to_vvq(_vq);
428 struct scatterlist *sg;
429 struct vring_desc *desc;
430 unsigned int i, n, avail, descs_used, prev, err_idx;
436 BUG_ON(data == NULL);
437 BUG_ON(ctx && vq->indirect);
439 if (unlikely(vq->broken)) {
444 LAST_ADD_TIME_UPDATE(vq);
446 BUG_ON(total_sg == 0);
448 head = vq->free_head;
450 if (virtqueue_use_indirect(_vq, total_sg))
451 desc = alloc_indirect_split(_vq, total_sg, gfp);
454 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
458 /* Use a single buffer which doesn't continue */
460 /* Set up rest to use this indirect table. */
465 desc = vq->split.vring.desc;
467 descs_used = total_sg;
470 if (vq->vq.num_free < descs_used) {
471 pr_debug("Can't add buf len %i - avail = %i\n",
472 descs_used, vq->vq.num_free);
473 /* FIXME: for historical reasons, we force a notify here if
474 * there are outgoing parts to the buffer. Presumably the
475 * host should service the ring ASAP. */
484 for (n = 0; n < out_sgs; n++) {
485 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
486 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
487 if (vring_mapping_error(vq, addr))
490 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
491 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
492 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
494 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
497 for (; n < (out_sgs + in_sgs); n++) {
498 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
499 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
500 if (vring_mapping_error(vq, addr))
503 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
504 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
505 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
507 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
510 /* Last one doesn't continue. */
511 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
514 /* Now that the indirect table is filled in, map it. */
515 dma_addr_t addr = vring_map_single(
516 vq, desc, total_sg * sizeof(struct vring_desc),
518 if (vring_mapping_error(vq, addr))
521 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
522 VRING_DESC_F_INDIRECT);
523 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
526 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
527 total_sg * sizeof(struct vring_desc));
530 /* We're using some buffers from the free list. */
531 vq->vq.num_free -= descs_used;
533 /* Update free pointer */
535 vq->free_head = virtio16_to_cpu(_vq->vdev,
536 vq->split.vring.desc[head].next);
540 /* Store token and indirect buffer state. */
541 vq->split.desc_state[head].data = data;
543 vq->split.desc_state[head].indir_desc = desc;
545 vq->split.desc_state[head].indir_desc = ctx;
547 /* Put entry in available array (but don't update avail->idx until they
549 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
550 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
552 /* Descriptors and available array need to be set before we expose the
553 * new available array entries. */
554 virtio_wmb(vq->weak_barriers);
555 vq->split.avail_idx_shadow++;
556 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
557 vq->split.avail_idx_shadow);
560 pr_debug("Added buffer head %i to %p\n", head, vq);
563 /* This is very unlikely, but theoretically possible. Kick
565 if (unlikely(vq->num_added == (1 << 16) - 1))
578 for (n = 0; n < total_sg; n++) {
581 vring_unmap_one_split(vq, &desc[i]);
582 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
592 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
594 struct vring_virtqueue *vq = to_vvq(_vq);
599 /* We need to expose available array entries before checking avail
601 virtio_mb(vq->weak_barriers);
603 old = vq->split.avail_idx_shadow - vq->num_added;
604 new = vq->split.avail_idx_shadow;
607 LAST_ADD_TIME_CHECK(vq);
608 LAST_ADD_TIME_INVALID(vq);
611 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
612 vring_avail_event(&vq->split.vring)),
615 needs_kick = !(vq->split.vring.used->flags &
616 cpu_to_virtio16(_vq->vdev,
617 VRING_USED_F_NO_NOTIFY));
623 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
627 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
629 /* Clear data ptr. */
630 vq->split.desc_state[head].data = NULL;
632 /* Put back on free list: unmap first-level descriptors and find end */
635 while (vq->split.vring.desc[i].flags & nextflag) {
636 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
637 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
641 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
642 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
644 vq->free_head = head;
646 /* Plus final descriptor */
650 struct vring_desc *indir_desc =
651 vq->split.desc_state[head].indir_desc;
654 /* Free the indirect table, if any, now that it's unmapped. */
658 len = virtio32_to_cpu(vq->vq.vdev,
659 vq->split.vring.desc[head].len);
661 BUG_ON(!(vq->split.vring.desc[head].flags &
662 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
663 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
665 for (j = 0; j < len / sizeof(struct vring_desc); j++)
666 vring_unmap_one_split(vq, &indir_desc[j]);
669 vq->split.desc_state[head].indir_desc = NULL;
671 *ctx = vq->split.desc_state[head].indir_desc;
675 static inline bool more_used_split(const struct vring_virtqueue *vq)
677 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
678 vq->split.vring.used->idx);
681 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
685 struct vring_virtqueue *vq = to_vvq(_vq);
692 if (unlikely(vq->broken)) {
697 if (!more_used_split(vq)) {
698 pr_debug("No more buffers in queue\n");
703 /* Only get used array entries after they have been exposed by host. */
704 virtio_rmb(vq->weak_barriers);
706 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
707 i = virtio32_to_cpu(_vq->vdev,
708 vq->split.vring.used->ring[last_used].id);
709 *len = virtio32_to_cpu(_vq->vdev,
710 vq->split.vring.used->ring[last_used].len);
712 if (unlikely(i >= vq->split.vring.num)) {
713 BAD_RING(vq, "id %u out of range\n", i);
716 if (unlikely(!vq->split.desc_state[i].data)) {
717 BAD_RING(vq, "id %u is not a head!\n", i);
721 /* detach_buf_split clears data, so grab it now. */
722 ret = vq->split.desc_state[i].data;
723 detach_buf_split(vq, i, ctx);
725 /* If we expect an interrupt for the next entry, tell host
726 * by writing event index and flush out the write before
727 * the read in the next get_buf call. */
728 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
729 virtio_store_mb(vq->weak_barriers,
730 &vring_used_event(&vq->split.vring),
731 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
733 LAST_ADD_TIME_INVALID(vq);
739 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
741 struct vring_virtqueue *vq = to_vvq(_vq);
743 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
744 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
746 /* TODO: this is a hack. Figure out a cleaner value to write. */
747 vring_used_event(&vq->split.vring) = 0x0;
749 vq->split.vring.avail->flags =
750 cpu_to_virtio16(_vq->vdev,
751 vq->split.avail_flags_shadow);
755 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
757 struct vring_virtqueue *vq = to_vvq(_vq);
762 /* We optimistically turn back on interrupts, then check if there was
764 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
765 * either clear the flags bit or point the event index at the next
766 * entry. Always do both to keep code simple. */
767 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
768 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
770 vq->split.vring.avail->flags =
771 cpu_to_virtio16(_vq->vdev,
772 vq->split.avail_flags_shadow);
774 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
775 last_used_idx = vq->last_used_idx);
777 return last_used_idx;
780 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
782 struct vring_virtqueue *vq = to_vvq(_vq);
784 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
785 vq->split.vring.used->idx);
788 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
790 struct vring_virtqueue *vq = to_vvq(_vq);
795 /* We optimistically turn back on interrupts, then check if there was
797 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
798 * either clear the flags bit or point the event index at the next
799 * entry. Always update the event index to keep code simple. */
800 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
801 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
803 vq->split.vring.avail->flags =
804 cpu_to_virtio16(_vq->vdev,
805 vq->split.avail_flags_shadow);
807 /* TODO: tune this threshold */
808 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
810 virtio_store_mb(vq->weak_barriers,
811 &vring_used_event(&vq->split.vring),
812 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
814 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
815 - vq->last_used_idx) > bufs)) {
824 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
826 struct vring_virtqueue *vq = to_vvq(_vq);
832 for (i = 0; i < vq->split.vring.num; i++) {
833 if (!vq->split.desc_state[i].data)
835 /* detach_buf_split clears data, so grab it now. */
836 buf = vq->split.desc_state[i].data;
837 detach_buf_split(vq, i, NULL);
838 vq->split.avail_idx_shadow--;
839 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
840 vq->split.avail_idx_shadow);
844 /* That should have freed everything. */
845 BUG_ON(vq->vq.num_free != vq->split.vring.num);
851 static struct virtqueue *vring_create_virtqueue_split(
854 unsigned int vring_align,
855 struct virtio_device *vdev,
859 bool (*notify)(struct virtqueue *),
860 void (*callback)(struct virtqueue *),
863 struct virtqueue *vq;
866 size_t queue_size_in_bytes;
869 /* We assume num is a power of 2. */
870 if (num & (num - 1)) {
871 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
875 /* TODO: allocate each queue chunk individually */
876 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
877 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
879 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
890 /* Try to get a single page. You are my only hope! */
891 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
892 &dma_addr, GFP_KERNEL|__GFP_ZERO);
897 queue_size_in_bytes = vring_size(num, vring_align);
898 vring_init(&vring, num, queue, vring_align);
900 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
901 notify, callback, name);
903 vring_free_queue(vdev, queue_size_in_bytes, queue,
908 to_vvq(vq)->split.queue_dma_addr = dma_addr;
909 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
910 to_vvq(vq)->we_own_ring = true;
917 * Packed ring specific functions - *_packed().
920 static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
921 struct vring_desc_extra *state)
925 if (!vq->use_dma_api)
928 flags = state->flags;
930 if (flags & VRING_DESC_F_INDIRECT) {
931 dma_unmap_single(vring_dma_dev(vq),
932 state->addr, state->len,
933 (flags & VRING_DESC_F_WRITE) ?
934 DMA_FROM_DEVICE : DMA_TO_DEVICE);
936 dma_unmap_page(vring_dma_dev(vq),
937 state->addr, state->len,
938 (flags & VRING_DESC_F_WRITE) ?
939 DMA_FROM_DEVICE : DMA_TO_DEVICE);
943 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
944 struct vring_packed_desc *desc)
948 if (!vq->use_dma_api)
951 flags = le16_to_cpu(desc->flags);
953 if (flags & VRING_DESC_F_INDIRECT) {
954 dma_unmap_single(vring_dma_dev(vq),
955 le64_to_cpu(desc->addr),
956 le32_to_cpu(desc->len),
957 (flags & VRING_DESC_F_WRITE) ?
958 DMA_FROM_DEVICE : DMA_TO_DEVICE);
960 dma_unmap_page(vring_dma_dev(vq),
961 le64_to_cpu(desc->addr),
962 le32_to_cpu(desc->len),
963 (flags & VRING_DESC_F_WRITE) ?
964 DMA_FROM_DEVICE : DMA_TO_DEVICE);
968 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
971 struct vring_packed_desc *desc;
974 * We require lowmem mappings for the descriptors because
975 * otherwise virt_to_phys will give us bogus addresses in the
978 gfp &= ~__GFP_HIGHMEM;
980 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
985 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
986 struct scatterlist *sgs[],
987 unsigned int total_sg,
988 unsigned int out_sgs,
993 struct vring_packed_desc *desc;
994 struct scatterlist *sg;
995 unsigned int i, n, err_idx;
999 head = vq->packed.next_avail_idx;
1000 desc = alloc_indirect_packed(total_sg, gfp);
1002 if (unlikely(vq->vq.num_free < 1)) {
1003 pr_debug("Can't add buf len 1 - avail = 0\n");
1011 BUG_ON(id == vq->packed.vring.num);
1013 for (n = 0; n < out_sgs + in_sgs; n++) {
1014 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1015 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1016 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1017 if (vring_mapping_error(vq, addr))
1020 desc[i].flags = cpu_to_le16(n < out_sgs ?
1021 0 : VRING_DESC_F_WRITE);
1022 desc[i].addr = cpu_to_le64(addr);
1023 desc[i].len = cpu_to_le32(sg->length);
1028 /* Now that the indirect table is filled in, map it. */
1029 addr = vring_map_single(vq, desc,
1030 total_sg * sizeof(struct vring_packed_desc),
1032 if (vring_mapping_error(vq, addr))
1035 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1036 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1037 sizeof(struct vring_packed_desc));
1038 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1040 if (vq->use_dma_api) {
1041 vq->packed.desc_extra[id].addr = addr;
1042 vq->packed.desc_extra[id].len = total_sg *
1043 sizeof(struct vring_packed_desc);
1044 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1045 vq->packed.avail_used_flags;
1049 * A driver MUST NOT make the first descriptor in the list
1050 * available before all subsequent descriptors comprising
1051 * the list are made available.
1053 virtio_wmb(vq->weak_barriers);
1054 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1055 vq->packed.avail_used_flags);
1057 /* We're using some buffers from the free list. */
1058 vq->vq.num_free -= 1;
1060 /* Update free pointer */
1062 if (n >= vq->packed.vring.num) {
1064 vq->packed.avail_wrap_counter ^= 1;
1065 vq->packed.avail_used_flags ^=
1066 1 << VRING_PACKED_DESC_F_AVAIL |
1067 1 << VRING_PACKED_DESC_F_USED;
1069 vq->packed.next_avail_idx = n;
1070 vq->free_head = vq->packed.desc_extra[id].next;
1072 /* Store token and indirect buffer state. */
1073 vq->packed.desc_state[id].num = 1;
1074 vq->packed.desc_state[id].data = data;
1075 vq->packed.desc_state[id].indir_desc = desc;
1076 vq->packed.desc_state[id].last = id;
1080 pr_debug("Added buffer head %i to %p\n", head, vq);
1088 for (i = 0; i < err_idx; i++)
1089 vring_unmap_desc_packed(vq, &desc[i]);
1097 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1098 struct scatterlist *sgs[],
1099 unsigned int total_sg,
1100 unsigned int out_sgs,
1101 unsigned int in_sgs,
1106 struct vring_virtqueue *vq = to_vvq(_vq);
1107 struct vring_packed_desc *desc;
1108 struct scatterlist *sg;
1109 unsigned int i, n, c, descs_used, err_idx;
1110 __le16 head_flags, flags;
1111 u16 head, id, prev, curr, avail_used_flags;
1115 BUG_ON(data == NULL);
1116 BUG_ON(ctx && vq->indirect);
1118 if (unlikely(vq->broken)) {
1123 LAST_ADD_TIME_UPDATE(vq);
1125 BUG_ON(total_sg == 0);
1127 if (virtqueue_use_indirect(_vq, total_sg))
1128 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
1129 out_sgs, in_sgs, data, gfp);
1131 head = vq->packed.next_avail_idx;
1132 avail_used_flags = vq->packed.avail_used_flags;
1134 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1136 desc = vq->packed.vring.desc;
1138 descs_used = total_sg;
1140 if (unlikely(vq->vq.num_free < descs_used)) {
1141 pr_debug("Can't add buf len %i - avail = %i\n",
1142 descs_used, vq->vq.num_free);
1148 BUG_ON(id == vq->packed.vring.num);
1152 for (n = 0; n < out_sgs + in_sgs; n++) {
1153 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1154 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1155 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1156 if (vring_mapping_error(vq, addr))
1159 flags = cpu_to_le16(vq->packed.avail_used_flags |
1160 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1161 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1165 desc[i].flags = flags;
1167 desc[i].addr = cpu_to_le64(addr);
1168 desc[i].len = cpu_to_le32(sg->length);
1169 desc[i].id = cpu_to_le16(id);
1171 if (unlikely(vq->use_dma_api)) {
1172 vq->packed.desc_extra[curr].addr = addr;
1173 vq->packed.desc_extra[curr].len = sg->length;
1174 vq->packed.desc_extra[curr].flags =
1178 curr = vq->packed.desc_extra[curr].next;
1180 if ((unlikely(++i >= vq->packed.vring.num))) {
1182 vq->packed.avail_used_flags ^=
1183 1 << VRING_PACKED_DESC_F_AVAIL |
1184 1 << VRING_PACKED_DESC_F_USED;
1190 vq->packed.avail_wrap_counter ^= 1;
1192 /* We're using some buffers from the free list. */
1193 vq->vq.num_free -= descs_used;
1195 /* Update free pointer */
1196 vq->packed.next_avail_idx = i;
1197 vq->free_head = curr;
1200 vq->packed.desc_state[id].num = descs_used;
1201 vq->packed.desc_state[id].data = data;
1202 vq->packed.desc_state[id].indir_desc = ctx;
1203 vq->packed.desc_state[id].last = prev;
1206 * A driver MUST NOT make the first descriptor in the list
1207 * available before all subsequent descriptors comprising
1208 * the list are made available.
1210 virtio_wmb(vq->weak_barriers);
1211 vq->packed.vring.desc[head].flags = head_flags;
1212 vq->num_added += descs_used;
1214 pr_debug("Added buffer head %i to %p\n", head, vq);
1223 vq->packed.avail_used_flags = avail_used_flags;
1225 for (n = 0; n < total_sg; n++) {
1228 vring_unmap_desc_packed(vq, &desc[i]);
1230 if (i >= vq->packed.vring.num)
1238 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1240 struct vring_virtqueue *vq = to_vvq(_vq);
1241 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1254 * We need to expose the new flags value before checking notification
1257 virtio_mb(vq->weak_barriers);
1259 old = vq->packed.next_avail_idx - vq->num_added;
1260 new = vq->packed.next_avail_idx;
1263 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1264 flags = le16_to_cpu(snapshot.flags);
1266 LAST_ADD_TIME_CHECK(vq);
1267 LAST_ADD_TIME_INVALID(vq);
1269 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1270 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1274 off_wrap = le16_to_cpu(snapshot.off_wrap);
1276 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1277 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1278 if (wrap_counter != vq->packed.avail_wrap_counter)
1279 event_idx -= vq->packed.vring.num;
1281 needs_kick = vring_need_event(event_idx, new, old);
1287 static void detach_buf_packed(struct vring_virtqueue *vq,
1288 unsigned int id, void **ctx)
1290 struct vring_desc_state_packed *state = NULL;
1291 struct vring_packed_desc *desc;
1292 unsigned int i, curr;
1294 state = &vq->packed.desc_state[id];
1296 /* Clear data ptr. */
1299 vq->packed.desc_extra[state->last].next = vq->free_head;
1301 vq->vq.num_free += state->num;
1303 if (unlikely(vq->use_dma_api)) {
1305 for (i = 0; i < state->num; i++) {
1306 vring_unmap_state_packed(vq,
1307 &vq->packed.desc_extra[curr]);
1308 curr = vq->packed.desc_extra[curr].next;
1315 /* Free the indirect table, if any, now that it's unmapped. */
1316 desc = state->indir_desc;
1320 if (vq->use_dma_api) {
1321 len = vq->packed.desc_extra[id].len;
1322 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1324 vring_unmap_desc_packed(vq, &desc[i]);
1327 state->indir_desc = NULL;
1329 *ctx = state->indir_desc;
1333 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1334 u16 idx, bool used_wrap_counter)
1339 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1340 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1341 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1343 return avail == used && used == used_wrap_counter;
1346 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1348 return is_used_desc_packed(vq, vq->last_used_idx,
1349 vq->packed.used_wrap_counter);
1352 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1356 struct vring_virtqueue *vq = to_vvq(_vq);
1362 if (unlikely(vq->broken)) {
1367 if (!more_used_packed(vq)) {
1368 pr_debug("No more buffers in queue\n");
1373 /* Only get used elements after they have been exposed by host. */
1374 virtio_rmb(vq->weak_barriers);
1376 last_used = vq->last_used_idx;
1377 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1378 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1380 if (unlikely(id >= vq->packed.vring.num)) {
1381 BAD_RING(vq, "id %u out of range\n", id);
1384 if (unlikely(!vq->packed.desc_state[id].data)) {
1385 BAD_RING(vq, "id %u is not a head!\n", id);
1389 /* detach_buf_packed clears data, so grab it now. */
1390 ret = vq->packed.desc_state[id].data;
1391 detach_buf_packed(vq, id, ctx);
1393 vq->last_used_idx += vq->packed.desc_state[id].num;
1394 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1395 vq->last_used_idx -= vq->packed.vring.num;
1396 vq->packed.used_wrap_counter ^= 1;
1400 * If we expect an interrupt for the next entry, tell host
1401 * by writing event index and flush out the write before
1402 * the read in the next get_buf call.
1404 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1405 virtio_store_mb(vq->weak_barriers,
1406 &vq->packed.vring.driver->off_wrap,
1407 cpu_to_le16(vq->last_used_idx |
1408 (vq->packed.used_wrap_counter <<
1409 VRING_PACKED_EVENT_F_WRAP_CTR)));
1411 LAST_ADD_TIME_INVALID(vq);
1417 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1419 struct vring_virtqueue *vq = to_vvq(_vq);
1421 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1422 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1423 vq->packed.vring.driver->flags =
1424 cpu_to_le16(vq->packed.event_flags_shadow);
1428 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1430 struct vring_virtqueue *vq = to_vvq(_vq);
1435 * We optimistically turn back on interrupts, then check if there was
1440 vq->packed.vring.driver->off_wrap =
1441 cpu_to_le16(vq->last_used_idx |
1442 (vq->packed.used_wrap_counter <<
1443 VRING_PACKED_EVENT_F_WRAP_CTR));
1445 * We need to update event offset and event wrap
1446 * counter first before updating event flags.
1448 virtio_wmb(vq->weak_barriers);
1451 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1452 vq->packed.event_flags_shadow = vq->event ?
1453 VRING_PACKED_EVENT_FLAG_DESC :
1454 VRING_PACKED_EVENT_FLAG_ENABLE;
1455 vq->packed.vring.driver->flags =
1456 cpu_to_le16(vq->packed.event_flags_shadow);
1460 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1461 VRING_PACKED_EVENT_F_WRAP_CTR);
1464 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1466 struct vring_virtqueue *vq = to_vvq(_vq);
1470 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1471 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1473 return is_used_desc_packed(vq, used_idx, wrap_counter);
1476 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1478 struct vring_virtqueue *vq = to_vvq(_vq);
1479 u16 used_idx, wrap_counter;
1485 * We optimistically turn back on interrupts, then check if there was
1490 /* TODO: tune this threshold */
1491 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1492 wrap_counter = vq->packed.used_wrap_counter;
1494 used_idx = vq->last_used_idx + bufs;
1495 if (used_idx >= vq->packed.vring.num) {
1496 used_idx -= vq->packed.vring.num;
1500 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1501 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1504 * We need to update event offset and event wrap
1505 * counter first before updating event flags.
1507 virtio_wmb(vq->weak_barriers);
1510 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1511 vq->packed.event_flags_shadow = vq->event ?
1512 VRING_PACKED_EVENT_FLAG_DESC :
1513 VRING_PACKED_EVENT_FLAG_ENABLE;
1514 vq->packed.vring.driver->flags =
1515 cpu_to_le16(vq->packed.event_flags_shadow);
1519 * We need to update event suppression structure first
1520 * before re-checking for more used buffers.
1522 virtio_mb(vq->weak_barriers);
1524 if (is_used_desc_packed(vq,
1526 vq->packed.used_wrap_counter)) {
1535 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1537 struct vring_virtqueue *vq = to_vvq(_vq);
1543 for (i = 0; i < vq->packed.vring.num; i++) {
1544 if (!vq->packed.desc_state[i].data)
1546 /* detach_buf clears data, so grab it now. */
1547 buf = vq->packed.desc_state[i].data;
1548 detach_buf_packed(vq, i, NULL);
1552 /* That should have freed everything. */
1553 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1559 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1562 struct vring_desc_extra *desc_extra;
1565 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1570 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1572 for (i = 0; i < num - 1; i++)
1573 desc_extra[i].next = i + 1;
1578 static struct virtqueue *vring_create_virtqueue_packed(
1581 unsigned int vring_align,
1582 struct virtio_device *vdev,
1584 bool may_reduce_num,
1586 bool (*notify)(struct virtqueue *),
1587 void (*callback)(struct virtqueue *),
1590 struct vring_virtqueue *vq;
1591 struct vring_packed_desc *ring;
1592 struct vring_packed_desc_event *driver, *device;
1593 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1594 size_t ring_size_in_bytes, event_size_in_bytes;
1596 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1598 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1600 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1604 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1606 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1607 &driver_event_dma_addr,
1608 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1612 device = vring_alloc_queue(vdev, event_size_in_bytes,
1613 &device_event_dma_addr,
1614 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1618 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1622 vq->vq.callback = callback;
1625 vq->vq.num_free = num;
1626 vq->vq.index = index;
1627 vq->we_own_ring = true;
1628 vq->notify = notify;
1629 vq->weak_barriers = weak_barriers;
1631 vq->last_used_idx = 0;
1632 vq->event_triggered = false;
1634 vq->packed_ring = true;
1635 vq->use_dma_api = vring_use_dma_api(vdev);
1638 vq->last_add_time_valid = false;
1641 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1643 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1645 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1646 vq->weak_barriers = false;
1648 vq->packed.ring_dma_addr = ring_dma_addr;
1649 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1650 vq->packed.device_event_dma_addr = device_event_dma_addr;
1652 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1653 vq->packed.event_size_in_bytes = event_size_in_bytes;
1655 vq->packed.vring.num = num;
1656 vq->packed.vring.desc = ring;
1657 vq->packed.vring.driver = driver;
1658 vq->packed.vring.device = device;
1660 vq->packed.next_avail_idx = 0;
1661 vq->packed.avail_wrap_counter = 1;
1662 vq->packed.used_wrap_counter = 1;
1663 vq->packed.event_flags_shadow = 0;
1664 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1666 vq->packed.desc_state = kmalloc_array(num,
1667 sizeof(struct vring_desc_state_packed),
1669 if (!vq->packed.desc_state)
1670 goto err_desc_state;
1672 memset(vq->packed.desc_state, 0,
1673 num * sizeof(struct vring_desc_state_packed));
1675 /* Put everything in free lists. */
1678 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
1679 if (!vq->packed.desc_extra)
1680 goto err_desc_extra;
1682 /* No callback? Tell other side not to bother us. */
1684 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1685 vq->packed.vring.driver->flags =
1686 cpu_to_le16(vq->packed.event_flags_shadow);
1689 list_add_tail(&vq->vq.list, &vdev->vqs);
1693 kfree(vq->packed.desc_state);
1697 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1699 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1701 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1708 * Generic functions and exported symbols.
1711 static inline int virtqueue_add(struct virtqueue *_vq,
1712 struct scatterlist *sgs[],
1713 unsigned int total_sg,
1714 unsigned int out_sgs,
1715 unsigned int in_sgs,
1720 struct vring_virtqueue *vq = to_vvq(_vq);
1722 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1723 out_sgs, in_sgs, data, ctx, gfp) :
1724 virtqueue_add_split(_vq, sgs, total_sg,
1725 out_sgs, in_sgs, data, ctx, gfp);
1729 * virtqueue_add_sgs - expose buffers to other end
1730 * @_vq: the struct virtqueue we're talking about.
1731 * @sgs: array of terminated scatterlists.
1732 * @out_sgs: the number of scatterlists readable by other side
1733 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1734 * @data: the token identifying the buffer.
1735 * @gfp: how to do memory allocations (if necessary).
1737 * Caller must ensure we don't call this with other virtqueue operations
1738 * at the same time (except where noted).
1740 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1742 int virtqueue_add_sgs(struct virtqueue *_vq,
1743 struct scatterlist *sgs[],
1744 unsigned int out_sgs,
1745 unsigned int in_sgs,
1749 unsigned int i, total_sg = 0;
1751 /* Count them first. */
1752 for (i = 0; i < out_sgs + in_sgs; i++) {
1753 struct scatterlist *sg;
1755 for (sg = sgs[i]; sg; sg = sg_next(sg))
1758 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1761 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1764 * virtqueue_add_outbuf - expose output buffers to other end
1765 * @vq: the struct virtqueue we're talking about.
1766 * @sg: scatterlist (must be well-formed and terminated!)
1767 * @num: the number of entries in @sg readable by other side
1768 * @data: the token identifying the buffer.
1769 * @gfp: how to do memory allocations (if necessary).
1771 * Caller must ensure we don't call this with other virtqueue operations
1772 * at the same time (except where noted).
1774 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1776 int virtqueue_add_outbuf(struct virtqueue *vq,
1777 struct scatterlist *sg, unsigned int num,
1781 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1783 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1786 * virtqueue_add_inbuf - expose input buffers to other end
1787 * @vq: the struct virtqueue we're talking about.
1788 * @sg: scatterlist (must be well-formed and terminated!)
1789 * @num: the number of entries in @sg writable by other side
1790 * @data: the token identifying the buffer.
1791 * @gfp: how to do memory allocations (if necessary).
1793 * Caller must ensure we don't call this with other virtqueue operations
1794 * at the same time (except where noted).
1796 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1798 int virtqueue_add_inbuf(struct virtqueue *vq,
1799 struct scatterlist *sg, unsigned int num,
1803 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1805 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1808 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1809 * @vq: the struct virtqueue we're talking about.
1810 * @sg: scatterlist (must be well-formed and terminated!)
1811 * @num: the number of entries in @sg writable by other side
1812 * @data: the token identifying the buffer.
1813 * @ctx: extra context for the token
1814 * @gfp: how to do memory allocations (if necessary).
1816 * Caller must ensure we don't call this with other virtqueue operations
1817 * at the same time (except where noted).
1819 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1821 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1822 struct scatterlist *sg, unsigned int num,
1827 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1829 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1832 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1833 * @_vq: the struct virtqueue
1835 * Instead of virtqueue_kick(), you can do:
1836 * if (virtqueue_kick_prepare(vq))
1837 * virtqueue_notify(vq);
1839 * This is sometimes useful because the virtqueue_kick_prepare() needs
1840 * to be serialized, but the actual virtqueue_notify() call does not.
1842 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1844 struct vring_virtqueue *vq = to_vvq(_vq);
1846 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1847 virtqueue_kick_prepare_split(_vq);
1849 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1852 * virtqueue_notify - second half of split virtqueue_kick call.
1853 * @_vq: the struct virtqueue
1855 * This does not need to be serialized.
1857 * Returns false if host notify failed or queue is broken, otherwise true.
1859 bool virtqueue_notify(struct virtqueue *_vq)
1861 struct vring_virtqueue *vq = to_vvq(_vq);
1863 if (unlikely(vq->broken))
1866 /* Prod other side to tell it about changes. */
1867 if (!vq->notify(_vq)) {
1873 EXPORT_SYMBOL_GPL(virtqueue_notify);
1876 * virtqueue_kick - update after add_buf
1877 * @vq: the struct virtqueue
1879 * After one or more virtqueue_add_* calls, invoke this to kick
1882 * Caller must ensure we don't call this with other virtqueue
1883 * operations at the same time (except where noted).
1885 * Returns false if kick failed, otherwise true.
1887 bool virtqueue_kick(struct virtqueue *vq)
1889 if (virtqueue_kick_prepare(vq))
1890 return virtqueue_notify(vq);
1893 EXPORT_SYMBOL_GPL(virtqueue_kick);
1896 * virtqueue_get_buf_ctx - get the next used buffer
1897 * @_vq: the struct virtqueue we're talking about.
1898 * @len: the length written into the buffer
1899 * @ctx: extra context for the token
1901 * If the device wrote data into the buffer, @len will be set to the
1902 * amount written. This means you don't need to clear the buffer
1903 * beforehand to ensure there's no data leakage in the case of short
1906 * Caller must ensure we don't call this with other virtqueue
1907 * operations at the same time (except where noted).
1909 * Returns NULL if there are no used buffers, or the "data" token
1910 * handed to virtqueue_add_*().
1912 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1915 struct vring_virtqueue *vq = to_vvq(_vq);
1917 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1918 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1920 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1922 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1924 return virtqueue_get_buf_ctx(_vq, len, NULL);
1926 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1928 * virtqueue_disable_cb - disable callbacks
1929 * @_vq: the struct virtqueue we're talking about.
1931 * Note that this is not necessarily synchronous, hence unreliable and only
1932 * useful as an optimization.
1934 * Unlike other operations, this need not be serialized.
1936 void virtqueue_disable_cb(struct virtqueue *_vq)
1938 struct vring_virtqueue *vq = to_vvq(_vq);
1940 /* If device triggered an event already it won't trigger one again:
1941 * no need to disable.
1943 if (vq->event_triggered)
1946 if (vq->packed_ring)
1947 virtqueue_disable_cb_packed(_vq);
1949 virtqueue_disable_cb_split(_vq);
1951 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1954 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1955 * @_vq: the struct virtqueue we're talking about.
1957 * This re-enables callbacks; it returns current queue state
1958 * in an opaque unsigned value. This value should be later tested by
1959 * virtqueue_poll, to detect a possible race between the driver checking for
1960 * more work, and enabling callbacks.
1962 * Caller must ensure we don't call this with other virtqueue
1963 * operations at the same time (except where noted).
1965 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1967 struct vring_virtqueue *vq = to_vvq(_vq);
1969 if (vq->event_triggered)
1970 vq->event_triggered = false;
1972 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1973 virtqueue_enable_cb_prepare_split(_vq);
1975 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1978 * virtqueue_poll - query pending used buffers
1979 * @_vq: the struct virtqueue we're talking about.
1980 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1982 * Returns "true" if there are pending used buffers in the queue.
1984 * This does not need to be serialized.
1986 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1988 struct vring_virtqueue *vq = to_vvq(_vq);
1990 if (unlikely(vq->broken))
1993 virtio_mb(vq->weak_barriers);
1994 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1995 virtqueue_poll_split(_vq, last_used_idx);
1997 EXPORT_SYMBOL_GPL(virtqueue_poll);
2000 * virtqueue_enable_cb - restart callbacks after disable_cb.
2001 * @_vq: the struct virtqueue we're talking about.
2003 * This re-enables callbacks; it returns "false" if there are pending
2004 * buffers in the queue, to detect a possible race between the driver
2005 * checking for more work, and enabling callbacks.
2007 * Caller must ensure we don't call this with other virtqueue
2008 * operations at the same time (except where noted).
2010 bool virtqueue_enable_cb(struct virtqueue *_vq)
2012 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2014 return !virtqueue_poll(_vq, last_used_idx);
2016 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2019 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2020 * @_vq: the struct virtqueue we're talking about.
2022 * This re-enables callbacks but hints to the other side to delay
2023 * interrupts until most of the available buffers have been processed;
2024 * it returns "false" if there are many pending buffers in the queue,
2025 * to detect a possible race between the driver checking for more work,
2026 * and enabling callbacks.
2028 * Caller must ensure we don't call this with other virtqueue
2029 * operations at the same time (except where noted).
2031 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2033 struct vring_virtqueue *vq = to_vvq(_vq);
2035 if (vq->event_triggered)
2036 vq->event_triggered = false;
2038 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2039 virtqueue_enable_cb_delayed_split(_vq);
2041 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2044 * virtqueue_detach_unused_buf - detach first unused buffer
2045 * @_vq: the struct virtqueue we're talking about.
2047 * Returns NULL or the "data" token handed to virtqueue_add_*().
2048 * This is not valid on an active queue; it is useful only for device
2051 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2053 struct vring_virtqueue *vq = to_vvq(_vq);
2055 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2056 virtqueue_detach_unused_buf_split(_vq);
2058 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2060 static inline bool more_used(const struct vring_virtqueue *vq)
2062 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2065 irqreturn_t vring_interrupt(int irq, void *_vq)
2067 struct vring_virtqueue *vq = to_vvq(_vq);
2069 if (!more_used(vq)) {
2070 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2074 if (unlikely(vq->broken))
2077 /* Just a hint for performance: so it's ok that this can be racy! */
2079 vq->event_triggered = true;
2081 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2082 if (vq->vq.callback)
2083 vq->vq.callback(&vq->vq);
2087 EXPORT_SYMBOL_GPL(vring_interrupt);
2089 /* Only available for split ring */
2090 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2092 struct virtio_device *vdev,
2095 bool (*notify)(struct virtqueue *),
2096 void (*callback)(struct virtqueue *),
2100 struct vring_virtqueue *vq;
2102 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2105 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2109 vq->packed_ring = false;
2110 vq->vq.callback = callback;
2113 vq->vq.num_free = vring.num;
2114 vq->vq.index = index;
2115 vq->we_own_ring = false;
2116 vq->notify = notify;
2117 vq->weak_barriers = weak_barriers;
2119 vq->last_used_idx = 0;
2120 vq->event_triggered = false;
2122 vq->use_dma_api = vring_use_dma_api(vdev);
2125 vq->last_add_time_valid = false;
2128 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2130 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2132 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2133 vq->weak_barriers = false;
2135 vq->split.queue_dma_addr = 0;
2136 vq->split.queue_size_in_bytes = 0;
2138 vq->split.vring = vring;
2139 vq->split.avail_flags_shadow = 0;
2140 vq->split.avail_idx_shadow = 0;
2142 /* No callback? Tell other side not to bother us. */
2144 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2146 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2147 vq->split.avail_flags_shadow);
2150 vq->split.desc_state = kmalloc_array(vring.num,
2151 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2152 if (!vq->split.desc_state) {
2157 /* Put everything in free lists. */
2159 for (i = 0; i < vring.num-1; i++)
2160 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2161 memset(vq->split.desc_state, 0, vring.num *
2162 sizeof(struct vring_desc_state_split));
2164 list_add_tail(&vq->vq.list, &vdev->vqs);
2167 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2169 struct virtqueue *vring_create_virtqueue(
2172 unsigned int vring_align,
2173 struct virtio_device *vdev,
2175 bool may_reduce_num,
2177 bool (*notify)(struct virtqueue *),
2178 void (*callback)(struct virtqueue *),
2182 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2183 return vring_create_virtqueue_packed(index, num, vring_align,
2184 vdev, weak_barriers, may_reduce_num,
2185 context, notify, callback, name);
2187 return vring_create_virtqueue_split(index, num, vring_align,
2188 vdev, weak_barriers, may_reduce_num,
2189 context, notify, callback, name);
2191 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2193 /* Only available for split ring */
2194 struct virtqueue *vring_new_virtqueue(unsigned int index,
2196 unsigned int vring_align,
2197 struct virtio_device *vdev,
2201 bool (*notify)(struct virtqueue *vq),
2202 void (*callback)(struct virtqueue *vq),
2207 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2210 vring_init(&vring, num, pages, vring_align);
2211 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2212 notify, callback, name);
2214 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2216 void vring_del_virtqueue(struct virtqueue *_vq)
2218 struct vring_virtqueue *vq = to_vvq(_vq);
2220 if (vq->we_own_ring) {
2221 if (vq->packed_ring) {
2222 vring_free_queue(vq->vq.vdev,
2223 vq->packed.ring_size_in_bytes,
2224 vq->packed.vring.desc,
2225 vq->packed.ring_dma_addr);
2227 vring_free_queue(vq->vq.vdev,
2228 vq->packed.event_size_in_bytes,
2229 vq->packed.vring.driver,
2230 vq->packed.driver_event_dma_addr);
2232 vring_free_queue(vq->vq.vdev,
2233 vq->packed.event_size_in_bytes,
2234 vq->packed.vring.device,
2235 vq->packed.device_event_dma_addr);
2237 kfree(vq->packed.desc_state);
2238 kfree(vq->packed.desc_extra);
2240 vring_free_queue(vq->vq.vdev,
2241 vq->split.queue_size_in_bytes,
2242 vq->split.vring.desc,
2243 vq->split.queue_dma_addr);
2246 if (!vq->packed_ring)
2247 kfree(vq->split.desc_state);
2248 list_del(&_vq->list);
2251 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2253 /* Manipulates transport-specific feature bits. */
2254 void vring_transport_features(struct virtio_device *vdev)
2258 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2260 case VIRTIO_RING_F_INDIRECT_DESC:
2262 case VIRTIO_RING_F_EVENT_IDX:
2264 case VIRTIO_F_VERSION_1:
2266 case VIRTIO_F_ACCESS_PLATFORM:
2268 case VIRTIO_F_RING_PACKED:
2270 case VIRTIO_F_ORDER_PLATFORM:
2273 /* We don't understand this bit. */
2274 __virtio_clear_bit(vdev, i);
2278 EXPORT_SYMBOL_GPL(vring_transport_features);
2281 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2282 * @_vq: the struct virtqueue containing the vring of interest.
2284 * Returns the size of the vring. This is mainly used for boasting to
2285 * userspace. Unlike other operations, this need not be serialized.
2287 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2290 struct vring_virtqueue *vq = to_vvq(_vq);
2292 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2294 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2296 bool virtqueue_is_broken(struct virtqueue *_vq)
2298 struct vring_virtqueue *vq = to_vvq(_vq);
2302 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2305 * This should prevent the device from being used, allowing drivers to
2306 * recover. You may need to grab appropriate locks to flush.
2308 void virtio_break_device(struct virtio_device *dev)
2310 struct virtqueue *_vq;
2312 list_for_each_entry(_vq, &dev->vqs, list) {
2313 struct vring_virtqueue *vq = to_vvq(_vq);
2317 EXPORT_SYMBOL_GPL(virtio_break_device);
2319 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2321 struct vring_virtqueue *vq = to_vvq(_vq);
2323 BUG_ON(!vq->we_own_ring);
2325 if (vq->packed_ring)
2326 return vq->packed.ring_dma_addr;
2328 return vq->split.queue_dma_addr;
2330 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2332 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2334 struct vring_virtqueue *vq = to_vvq(_vq);
2336 BUG_ON(!vq->we_own_ring);
2338 if (vq->packed_ring)
2339 return vq->packed.driver_event_dma_addr;
2341 return vq->split.queue_dma_addr +
2342 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2344 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2346 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2348 struct vring_virtqueue *vq = to_vvq(_vq);
2350 BUG_ON(!vq->we_own_ring);
2352 if (vq->packed_ring)
2353 return vq->packed.device_event_dma_addr;
2355 return vq->split.queue_dma_addr +
2356 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2358 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2360 /* Only available for split ring */
2361 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2363 return &to_vvq(vq)->split.vring;
2365 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2367 MODULE_LICENSE("GPL");