1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio vhost-user driver
5 * Copyright(c) 2019 Intel Corporation
7 * This driver allows virtio devices to be used over a vhost-user socket.
9 * Guest devices can be instantiated by kernel module or command line
10 * parameters. One device will be created for each parameter. Syntax:
12 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
14 * <socket> := vhost-user socket path to connect
15 * <virtio_id> := virtio device id (as in virtio_ids.h)
16 * <platform_id> := (optional) platform device id
19 * virtio_uml.device=/var/uml.socket:1
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <linux/virtio_ring.h>
29 #include <linux/time-internal.h>
30 #include <shared/as-layout.h>
34 #include "vhost_user.h"
36 #define MAX_SUPPORTED_QUEUE_SIZE 256
38 #define to_virtio_uml_device(_vdev) \
39 container_of(_vdev, struct virtio_uml_device, vdev)
41 struct virtio_uml_platform_data {
43 const char *socket_path;
44 struct work_struct conn_broken_wk;
45 struct platform_device *pdev;
48 struct virtio_uml_device {
49 struct virtio_device vdev;
50 struct platform_device *pdev;
53 int sock, req_fd, irq;
55 u64 protocol_features;
61 u8 config_changed_irq:1;
62 uint64_t vq_irq_vq_map;
65 struct virtio_uml_vq_info {
71 extern unsigned long long physmem_size, highmem;
73 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
75 /* Vhost-user protocol */
77 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
78 const int *fds, unsigned int fds_num)
83 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
90 } while (len && (rc >= 0 || rc == -EINTR));
97 static int full_read(int fd, void *buf, int len, bool abortable)
105 rc = os_read_file(fd, buf, len);
110 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
119 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
121 return full_read(fd, msg, sizeof(msg->header), true);
124 static int vhost_user_recv(struct virtio_uml_device *vu_dev,
125 int fd, struct vhost_user_msg *msg,
126 size_t max_payload_size, bool wait)
132 * In virtio time-travel mode, we're handling all the vhost-user
133 * FDs by polling them whenever appropriate. However, we may get
134 * into a situation where we're sending out an interrupt message
135 * to a device (e.g. a net device) and need to handle a simulation
136 * time message while doing so, e.g. one that tells us to update
137 * our idea of how long we can run without scheduling.
139 * Thus, we need to not just read() from the given fd, but need
140 * to also handle messages for the simulation time - this function
141 * does that for us while waiting for the given fd to be readable.
144 time_travel_wait_readable(fd);
146 rc = vhost_user_recv_header(fd, msg);
148 if (rc == -ECONNRESET && vu_dev->registered) {
149 struct virtio_uml_platform_data *pdata;
151 pdata = vu_dev->pdev->dev.platform_data;
153 virtio_break_device(&vu_dev->vdev);
154 schedule_work(&pdata->conn_broken_wk);
158 size = msg->header.size;
159 if (size > max_payload_size)
161 return full_read(fd, &msg->payload, size, false);
164 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
165 struct vhost_user_msg *msg,
166 size_t max_payload_size)
168 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
169 max_payload_size, true);
174 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
180 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
183 struct vhost_user_msg msg;
184 int rc = vhost_user_recv_resp(vu_dev, &msg,
185 sizeof(msg.payload.integer));
189 if (msg.header.size != sizeof(msg.payload.integer))
191 *value = msg.payload.integer;
195 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
196 struct vhost_user_msg *msg,
197 size_t max_payload_size)
199 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
200 max_payload_size, false);
205 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
212 static int vhost_user_send(struct virtio_uml_device *vu_dev,
213 bool need_response, struct vhost_user_msg *msg,
214 int *fds, size_t num_fds)
216 size_t size = sizeof(msg->header) + msg->header.size;
221 msg->header.flags |= VHOST_USER_VERSION;
224 * The need_response flag indicates that we already need a response,
225 * e.g. to read the features. In these cases, don't request an ACK as
226 * it is meaningless. Also request an ACK only if supported.
228 request_ack = !need_response;
229 if (!(vu_dev->protocol_features &
230 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
234 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
236 spin_lock_irqsave(&vu_dev->sock_lock, flags);
237 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
244 rc = vhost_user_recv_u64(vu_dev, &status);
249 vu_err(vu_dev, "slave reports error: %llu\n", status);
256 spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
260 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
261 bool need_response, u32 request)
263 struct vhost_user_msg msg = {
264 .header.request = request,
267 return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
270 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
273 struct vhost_user_msg msg = {
274 .header.request = request,
277 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
280 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
281 u32 request, u64 value)
283 struct vhost_user_msg msg = {
284 .header.request = request,
285 .header.size = sizeof(msg.payload.integer),
286 .payload.integer = value,
289 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
292 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
294 return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
297 static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
300 int rc = vhost_user_send_no_payload(vu_dev, true,
301 VHOST_USER_GET_FEATURES);
305 return vhost_user_recv_u64(vu_dev, features);
308 static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
311 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
314 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
315 u64 *protocol_features)
317 int rc = vhost_user_send_no_payload(vu_dev, true,
318 VHOST_USER_GET_PROTOCOL_FEATURES);
322 return vhost_user_recv_u64(vu_dev, protocol_features);
325 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
326 u64 protocol_features)
328 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
332 static void vhost_user_reply(struct virtio_uml_device *vu_dev,
333 struct vhost_user_msg *msg, int response)
335 struct vhost_user_msg reply = {
336 .payload.integer = response,
338 size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
341 reply.header = msg->header;
342 reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
343 reply.header.flags |= VHOST_USER_FLAG_REPLY;
344 reply.header.size = sizeof(reply.payload.integer);
346 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
350 "sending reply to slave request failed: %d (size %zu)\n",
354 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
355 struct time_travel_event *ev)
357 struct virtqueue *vq;
360 struct vhost_user_msg msg;
361 u8 extra_payload[512];
365 rc = vhost_user_recv_req(vu_dev, &msg.msg,
366 sizeof(msg.msg.payload) +
367 sizeof(msg.extra_payload));
372 switch (msg.msg.header.request) {
373 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
374 vu_dev->config_changed_irq = true;
377 case VHOST_USER_SLAVE_VRING_CALL:
378 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
379 if (vq->index == msg.msg.payload.vring_state.index) {
381 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
386 case VHOST_USER_SLAVE_IOTLB_MSG:
387 /* not supported - VIRTIO_F_ACCESS_PLATFORM */
388 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
389 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
391 vu_err(vu_dev, "unexpected slave request %d\n",
392 msg.msg.header.request);
395 if (ev && !vu_dev->suspended)
396 time_travel_add_irq_event(ev);
398 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
399 vhost_user_reply(vu_dev, &msg.msg, response);
404 static irqreturn_t vu_req_interrupt(int irq, void *data)
406 struct virtio_uml_device *vu_dev = data;
407 irqreturn_t ret = IRQ_HANDLED;
409 if (!um_irq_timetravel_handler_used())
410 ret = vu_req_read_message(vu_dev, NULL);
412 if (vu_dev->vq_irq_vq_map) {
413 struct virtqueue *vq;
415 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
416 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
417 vring_interrupt(0 /* ignored */, vq);
419 vu_dev->vq_irq_vq_map = 0;
420 } else if (vu_dev->config_changed_irq) {
421 virtio_config_changed(&vu_dev->vdev);
422 vu_dev->config_changed_irq = false;
428 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
429 struct time_travel_event *ev)
431 vu_req_read_message(data, ev);
434 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
438 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
439 rc = os_pipe(req_fds, true, true);
442 vu_dev->req_fd = req_fds[0];
444 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
445 vu_req_interrupt, IRQF_SHARED,
446 vu_dev->pdev->name, vu_dev,
447 vu_req_interrupt_comm_handler);
453 rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
461 um_free_irq(vu_dev->irq, vu_dev);
463 os_close_file(req_fds[0]);
465 /* Close unused write end of request fds */
466 os_close_file(req_fds[1]);
470 static int vhost_user_init(struct virtio_uml_device *vu_dev)
472 int rc = vhost_user_set_owner(vu_dev);
476 rc = vhost_user_get_features(vu_dev, &vu_dev->features);
480 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
481 rc = vhost_user_get_protocol_features(vu_dev,
482 &vu_dev->protocol_features);
485 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
486 rc = vhost_user_set_protocol_features(vu_dev,
487 vu_dev->protocol_features);
492 if (vu_dev->protocol_features &
493 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
494 rc = vhost_user_init_slave_req(vu_dev);
502 static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
503 u32 offset, void *buf, u32 len)
505 u32 cfg_size = offset + len;
506 struct vhost_user_msg *msg;
507 size_t payload_size = sizeof(msg->payload.config) + cfg_size;
508 size_t msg_size = sizeof(msg->header) + payload_size;
511 if (!(vu_dev->protocol_features &
512 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
515 msg = kzalloc(msg_size, GFP_KERNEL);
518 msg->header.request = VHOST_USER_GET_CONFIG;
519 msg->header.size = payload_size;
520 msg->payload.config.offset = 0;
521 msg->payload.config.size = cfg_size;
523 rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
525 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
530 rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
533 "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
538 if (msg->header.size != payload_size ||
539 msg->payload.config.size != cfg_size) {
542 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
543 msg->header.size, payload_size,
544 msg->payload.config.size, cfg_size);
547 memcpy(buf, msg->payload.config.payload + offset, len);
553 static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
554 u32 offset, const void *buf, u32 len)
556 struct vhost_user_msg *msg;
557 size_t payload_size = sizeof(msg->payload.config) + len;
558 size_t msg_size = sizeof(msg->header) + payload_size;
561 if (!(vu_dev->protocol_features &
562 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
565 msg = kzalloc(msg_size, GFP_KERNEL);
568 msg->header.request = VHOST_USER_SET_CONFIG;
569 msg->header.size = payload_size;
570 msg->payload.config.offset = offset;
571 msg->payload.config.size = len;
572 memcpy(msg->payload.config.payload, buf, len);
574 rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
576 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
582 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
583 struct vhost_user_mem_region *region_out)
585 unsigned long long mem_offset;
586 int rc = phys_mapping(addr, &mem_offset);
588 if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
591 region_out->guest_addr = addr;
592 region_out->user_addr = addr;
593 region_out->size = size;
594 region_out->mmap_offset = mem_offset;
596 /* Ensure mapping is valid for the entire region */
597 rc = phys_mapping(addr + size - 1, &mem_offset);
598 if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
599 addr + size - 1, rc, *fd_out))
604 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
606 struct vhost_user_msg msg = {
607 .header.request = VHOST_USER_SET_MEM_TABLE,
608 .header.size = sizeof(msg.payload.mem_regions),
609 .payload.mem_regions.num = 1,
611 unsigned long reserved = uml_reserved - uml_physmem;
616 * This is a bit tricky, see also the comment with setup_physmem().
618 * Essentially, setup_physmem() uses a file to mmap() our physmem,
619 * but the code and data we *already* have is omitted. To us, this
620 * is no difference, since they both become part of our address
621 * space and memory consumption. To somebody looking in from the
622 * outside, however, it is different because the part of our memory
623 * consumption that's already part of the binary (code/data) is not
624 * mapped from the file, so it's not visible to another mmap from
625 * the file descriptor.
627 * Thus, don't advertise this space to the vhost-user slave. This
628 * means that the slave will likely abort or similar when we give
629 * it an address from the hidden range, since it's not marked as
630 * a valid address, but at least that way we detect the issue and
631 * don't just have the slave read an all-zeroes buffer from the
632 * shared memory file, or write something there that we can never
633 * see (depending on the direction of the virtqueue traffic.)
635 * Since we usually don't want to use .text for virtio buffers,
636 * this effectively means that you cannot use
637 * 1) global variables, which are in the .bss and not in the shm
639 * 2) the stack in some processes, depending on where they have
640 * their stack (or maybe only no interrupt stack?)
642 * The stack is already not typically valid for DMA, so this isn't
643 * much of a restriction, but global variables might be encountered.
645 * It might be possible to fix it by copying around the data that's
646 * between bss_start and where we map the file now, but it's not
647 * something that you typically encounter with virtio drivers, so
648 * it didn't seem worthwhile.
650 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
652 &msg.payload.mem_regions.regions[0]);
657 msg.payload.mem_regions.num++;
658 rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
659 &fds[1], &msg.payload.mem_regions.regions[1]);
664 return vhost_user_send(vu_dev, false, &msg, fds,
665 msg.payload.mem_regions.num);
668 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
669 u32 request, u32 index, u32 num)
671 struct vhost_user_msg msg = {
672 .header.request = request,
673 .header.size = sizeof(msg.payload.vring_state),
674 .payload.vring_state.index = index,
675 .payload.vring_state.num = num,
678 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
681 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
684 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
688 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
689 u32 index, u32 offset)
691 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
695 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
696 u32 index, u64 desc, u64 used, u64 avail,
699 struct vhost_user_msg msg = {
700 .header.request = VHOST_USER_SET_VRING_ADDR,
701 .header.size = sizeof(msg.payload.vring_addr),
702 .payload.vring_addr.index = index,
703 .payload.vring_addr.desc = desc,
704 .payload.vring_addr.used = used,
705 .payload.vring_addr.avail = avail,
706 .payload.vring_addr.log = log,
709 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
712 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
713 u32 request, int index, int fd)
715 struct vhost_user_msg msg = {
716 .header.request = request,
717 .header.size = sizeof(msg.payload.integer),
718 .payload.integer = index,
721 if (index & ~VHOST_USER_VRING_INDEX_MASK)
724 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
725 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
727 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
730 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
733 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
737 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
740 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
744 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
745 u32 index, bool enable)
747 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
750 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
755 /* Virtio interface */
757 static bool vu_notify(struct virtqueue *vq)
759 struct virtio_uml_vq_info *info = vq->priv;
760 const uint64_t n = 1;
766 time_travel_propagate_time();
768 if (info->kick_fd < 0) {
769 struct virtio_uml_device *vu_dev;
771 vu_dev = to_virtio_uml_device(vq->vdev);
773 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
778 rc = os_write_file(info->kick_fd, &n, sizeof(n));
779 } while (rc == -EINTR);
780 return !WARN(rc != sizeof(n), "write returned %d\n", rc);
783 static irqreturn_t vu_interrupt(int irq, void *opaque)
785 struct virtqueue *vq = opaque;
786 struct virtio_uml_vq_info *info = vq->priv;
789 irqreturn_t ret = IRQ_NONE;
792 rc = os_read_file(info->call_fd, &n, sizeof(n));
794 ret |= vring_interrupt(irq, vq);
795 } while (rc == sizeof(n) || rc == -EINTR);
796 WARN(rc != -EAGAIN, "read returned %d\n", rc);
801 static void vu_get(struct virtio_device *vdev, unsigned offset,
802 void *buf, unsigned len)
804 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
806 vhost_user_get_config(vu_dev, offset, buf, len);
809 static void vu_set(struct virtio_device *vdev, unsigned offset,
810 const void *buf, unsigned len)
812 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
814 vhost_user_set_config(vu_dev, offset, buf, len);
817 static u8 vu_get_status(struct virtio_device *vdev)
819 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
821 return vu_dev->status;
824 static void vu_set_status(struct virtio_device *vdev, u8 status)
826 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
828 vu_dev->status = status;
831 static void vu_reset(struct virtio_device *vdev)
833 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
838 static void vu_del_vq(struct virtqueue *vq)
840 struct virtio_uml_vq_info *info = vq->priv;
842 if (info->call_fd >= 0) {
843 struct virtio_uml_device *vu_dev;
845 vu_dev = to_virtio_uml_device(vq->vdev);
847 um_free_irq(vu_dev->irq, vq);
848 os_close_file(info->call_fd);
851 if (info->kick_fd >= 0)
852 os_close_file(info->kick_fd);
854 vring_del_virtqueue(vq);
858 static void vu_del_vqs(struct virtio_device *vdev)
860 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
861 struct virtqueue *vq, *n;
864 /* Note: reverse order as a workaround to a decoding bug in snabb */
865 list_for_each_entry_reverse(vq, &vdev->vqs, list)
866 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
868 /* Ensure previous messages have been processed */
869 WARN_ON(vhost_user_get_features(vu_dev, &features));
871 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
875 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
876 struct virtqueue *vq)
878 struct virtio_uml_vq_info *info = vq->priv;
882 /* no call FD needed/desired in this case */
883 if (vu_dev->protocol_features &
884 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
885 vu_dev->protocol_features &
886 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
891 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
892 rc = os_pipe(call_fds, true, true);
896 info->call_fd = call_fds[0];
897 rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
898 vu_interrupt, IRQF_SHARED, info->name, vq);
902 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
909 um_free_irq(vu_dev->irq, vq);
911 os_close_file(call_fds[0]);
913 /* Close (unused) write end of call fds */
914 os_close_file(call_fds[1]);
919 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
920 unsigned index, vq_callback_t *callback,
921 const char *name, bool ctx)
923 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
924 struct platform_device *pdev = vu_dev->pdev;
925 struct virtio_uml_vq_info *info;
926 struct virtqueue *vq;
927 int num = MAX_SUPPORTED_QUEUE_SIZE;
930 info = kzalloc(sizeof(*info), GFP_KERNEL);
935 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
938 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
939 ctx, vu_notify, callback, info->name);
945 num = virtqueue_get_vring_size(vq);
947 if (vu_dev->protocol_features &
948 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
951 rc = os_eventfd(0, 0);
957 rc = vu_setup_vq_call_fd(vu_dev, vq);
961 rc = vhost_user_set_vring_num(vu_dev, index, num);
965 rc = vhost_user_set_vring_base(vu_dev, index, 0);
969 rc = vhost_user_set_vring_addr(vu_dev, index,
970 virtqueue_get_desc_addr(vq),
971 virtqueue_get_used_addr(vq),
972 virtqueue_get_avail_addr(vq),
980 if (info->call_fd >= 0) {
981 um_free_irq(vu_dev->irq, vq);
982 os_close_file(info->call_fd);
985 if (info->kick_fd >= 0)
986 os_close_file(info->kick_fd);
988 vring_del_virtqueue(vq);
995 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
996 struct virtqueue *vqs[], vq_callback_t *callbacks[],
997 const char * const names[], const bool *ctx,
998 struct irq_affinity *desc)
1000 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1001 int i, queue_idx = 0, rc;
1002 struct virtqueue *vq;
1004 /* not supported for now */
1005 if (WARN_ON(nvqs > 64))
1008 rc = vhost_user_set_mem_table(vu_dev);
1012 for (i = 0; i < nvqs; ++i) {
1018 vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
1019 ctx ? ctx[i] : false);
1020 if (IS_ERR(vqs[i])) {
1021 rc = PTR_ERR(vqs[i]);
1026 list_for_each_entry(vq, &vdev->vqs, list) {
1027 struct virtio_uml_vq_info *info = vq->priv;
1029 if (info->kick_fd >= 0) {
1030 rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1036 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1048 static u64 vu_get_features(struct virtio_device *vdev)
1050 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1052 return vu_dev->features;
1055 static int vu_finalize_features(struct virtio_device *vdev)
1057 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1058 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1060 vring_transport_features(vdev);
1061 vu_dev->features = vdev->features | supported;
1063 return vhost_user_set_features(vu_dev, vu_dev->features);
1066 static const char *vu_bus_name(struct virtio_device *vdev)
1068 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1070 return vu_dev->pdev->name;
1073 static const struct virtio_config_ops virtio_uml_config_ops = {
1076 .get_status = vu_get_status,
1077 .set_status = vu_set_status,
1079 .find_vqs = vu_find_vqs,
1080 .del_vqs = vu_del_vqs,
1081 .get_features = vu_get_features,
1082 .finalize_features = vu_finalize_features,
1083 .bus_name = vu_bus_name,
1086 static void virtio_uml_release_dev(struct device *d)
1088 struct virtio_device *vdev =
1089 container_of(d, struct virtio_device, dev);
1090 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1092 /* might not have been opened due to not negotiating the feature */
1093 if (vu_dev->req_fd >= 0) {
1094 um_free_irq(vu_dev->irq, vu_dev);
1095 os_close_file(vu_dev->req_fd);
1098 os_close_file(vu_dev->sock);
1102 void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
1105 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1107 if (WARN_ON(vdev->config != &virtio_uml_config_ops))
1110 vu_dev->no_vq_suspend = no_vq_suspend;
1111 dev_info(&vdev->dev, "%sabled VQ suspend\n",
1112 no_vq_suspend ? "dis" : "en");
1115 /* Platform device */
1117 static int virtio_uml_probe(struct platform_device *pdev)
1119 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1120 struct virtio_uml_device *vu_dev;
1126 vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
1130 vu_dev->vdev.dev.parent = &pdev->dev;
1131 vu_dev->vdev.dev.release = virtio_uml_release_dev;
1132 vu_dev->vdev.config = &virtio_uml_config_ops;
1133 vu_dev->vdev.id.device = pdata->virtio_device_id;
1134 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1135 vu_dev->pdev = pdev;
1136 vu_dev->req_fd = -1;
1139 rc = os_connect_socket(pdata->socket_path);
1140 } while (rc == -EINTR);
1145 spin_lock_init(&vu_dev->sock_lock);
1147 rc = vhost_user_init(vu_dev);
1151 platform_set_drvdata(pdev, vu_dev);
1153 device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1155 rc = register_virtio_device(&vu_dev->vdev);
1157 put_device(&vu_dev->vdev.dev);
1158 vu_dev->registered = 1;
1162 os_close_file(vu_dev->sock);
1166 static int virtio_uml_remove(struct platform_device *pdev)
1168 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1170 unregister_virtio_device(&vu_dev->vdev);
1174 /* Command line device list */
1176 static void vu_cmdline_release_dev(struct device *d)
1180 static struct device vu_cmdline_parent = {
1181 .init_name = "virtio-uml-cmdline",
1182 .release = vu_cmdline_release_dev,
1185 static bool vu_cmdline_parent_registered;
1186 static int vu_cmdline_id;
1188 static int vu_unregister_cmdline_device(struct device *dev, void *data)
1190 struct platform_device *pdev = to_platform_device(dev);
1191 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1193 kfree(pdata->socket_path);
1194 platform_device_unregister(pdev);
1198 static void vu_conn_broken(struct work_struct *wk)
1200 struct virtio_uml_platform_data *pdata;
1202 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1203 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1206 static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1208 const char *ids = strchr(device, ':');
1209 unsigned int virtio_device_id;
1210 int processed, consumed, err;
1212 struct virtio_uml_platform_data pdata, *ppdata;
1213 struct platform_device *pdev;
1215 if (!ids || ids == device)
1218 processed = sscanf(ids, ":%u%n:%d%n",
1219 &virtio_device_id, &consumed,
1220 &vu_cmdline_id, &consumed);
1222 if (processed < 1 || ids[consumed])
1225 if (!vu_cmdline_parent_registered) {
1226 err = device_register(&vu_cmdline_parent);
1228 pr_err("Failed to register parent device!\n");
1229 put_device(&vu_cmdline_parent);
1232 vu_cmdline_parent_registered = true;
1235 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1239 pdata.virtio_device_id = (u32) virtio_device_id;
1240 pdata.socket_path = socket_path;
1242 pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1243 vu_cmdline_id, virtio_device_id, socket_path);
1245 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1246 vu_cmdline_id++, &pdata,
1248 err = PTR_ERR_OR_ZERO(pdev);
1252 ppdata = pdev->dev.platform_data;
1253 ppdata->pdev = pdev;
1254 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1263 static int vu_cmdline_get_device(struct device *dev, void *data)
1265 struct platform_device *pdev = to_platform_device(dev);
1266 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1267 char *buffer = data;
1268 unsigned int len = strlen(buffer);
1270 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1271 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1275 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1278 if (vu_cmdline_parent_registered)
1279 device_for_each_child(&vu_cmdline_parent, buffer,
1280 vu_cmdline_get_device);
1281 return strlen(buffer) + 1;
1284 static const struct kernel_param_ops vu_cmdline_param_ops = {
1285 .set = vu_cmdline_set,
1286 .get = vu_cmdline_get,
1289 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1290 __uml_help(vu_cmdline_param_ops,
1291 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1292 " Configure a virtio device over a vhost-user socket.\n"
1293 " See virtio_ids.h for a list of possible virtio device id values.\n"
1294 " Optionally use a specific platform_device id.\n\n"
1298 static void vu_unregister_cmdline_devices(void)
1300 if (vu_cmdline_parent_registered) {
1301 device_for_each_child(&vu_cmdline_parent, NULL,
1302 vu_unregister_cmdline_device);
1303 device_unregister(&vu_cmdline_parent);
1304 vu_cmdline_parent_registered = false;
1308 /* Platform driver */
1310 static const struct of_device_id virtio_uml_match[] = {
1311 { .compatible = "virtio,uml", },
1314 MODULE_DEVICE_TABLE(of, virtio_uml_match);
1316 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1318 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1320 if (!vu_dev->no_vq_suspend) {
1321 struct virtqueue *vq;
1323 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1324 struct virtio_uml_vq_info *info = vq->priv;
1326 info->suspended = true;
1327 vhost_user_set_vring_enable(vu_dev, vq->index, false);
1331 if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1332 vu_dev->suspended = true;
1336 return irq_set_irq_wake(vu_dev->irq, 1);
1339 static int virtio_uml_resume(struct platform_device *pdev)
1341 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1343 if (!vu_dev->no_vq_suspend) {
1344 struct virtqueue *vq;
1346 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1347 struct virtio_uml_vq_info *info = vq->priv;
1349 info->suspended = false;
1350 vhost_user_set_vring_enable(vu_dev, vq->index, true);
1354 vu_dev->suspended = false;
1356 if (!device_may_wakeup(&vu_dev->vdev.dev))
1359 return irq_set_irq_wake(vu_dev->irq, 0);
1362 static struct platform_driver virtio_uml_driver = {
1363 .probe = virtio_uml_probe,
1364 .remove = virtio_uml_remove,
1366 .name = "virtio-uml",
1367 .of_match_table = virtio_uml_match,
1369 .suspend = virtio_uml_suspend,
1370 .resume = virtio_uml_resume,
1373 static int __init virtio_uml_init(void)
1375 return platform_driver_register(&virtio_uml_driver);
1378 static void __exit virtio_uml_exit(void)
1380 platform_driver_unregister(&virtio_uml_driver);
1381 vu_unregister_cmdline_devices();
1384 module_init(virtio_uml_init);
1385 module_exit(virtio_uml_exit);
1386 __uml_exitcall(virtio_uml_exit);
1388 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1389 MODULE_LICENSE("GPL");