4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #ifndef _VMW_VMCI_DEF_H_
17 #define _VMW_VMCI_DEF_H_
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
22 /* Register offsets. */
23 #define VMCI_STATUS_ADDR 0x00
24 #define VMCI_CONTROL_ADDR 0x04
25 #define VMCI_ICR_ADDR 0x08
26 #define VMCI_IMR_ADDR 0x0c
27 #define VMCI_DATA_OUT_ADDR 0x10
28 #define VMCI_DATA_IN_ADDR 0x14
29 #define VMCI_CAPS_ADDR 0x18
30 #define VMCI_RESULT_LOW_ADDR 0x1c
31 #define VMCI_RESULT_HIGH_ADDR 0x20
33 /* Max number of devices. */
34 #define VMCI_MAX_DEVICES 1
36 /* Status register bits. */
37 #define VMCI_STATUS_INT_ON BIT(0)
39 /* Control register bits. */
40 #define VMCI_CONTROL_RESET BIT(0)
41 #define VMCI_CONTROL_INT_ENABLE BIT(1)
42 #define VMCI_CONTROL_INT_DISABLE BIT(2)
44 /* Capabilities register bits. */
45 #define VMCI_CAPS_HYPERCALL BIT(0)
46 #define VMCI_CAPS_GUESTCALL BIT(1)
47 #define VMCI_CAPS_DATAGRAM BIT(2)
48 #define VMCI_CAPS_NOTIFICATIONS BIT(3)
49 #define VMCI_CAPS_PPN64 BIT(4)
51 /* Interrupt Cause register bits. */
52 #define VMCI_ICR_DATAGRAM BIT(0)
53 #define VMCI_ICR_NOTIFICATION BIT(1)
55 /* Interrupt Mask register bits. */
56 #define VMCI_IMR_DATAGRAM BIT(0)
57 #define VMCI_IMR_NOTIFICATION BIT(1)
59 /* Maximum MSI/MSI-X interrupt vectors in the device. */
60 #define VMCI_MAX_INTRS 2
63 * Supported interrupt vectors. There is one for each ICR value above,
64 * but here they indicate the position in the vector array/message ID.
67 VMCI_INTR_DATAGRAM = 0,
68 VMCI_INTR_NOTIFICATION = 1,
72 * A single VMCI device has an upper limit of 128MB on the amount of
73 * memory that can be used for queue pairs.
75 #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
78 * Queues with pre-mapped data pages must be small, so that we don't pin
79 * too much kernel memory (especially on vmkernel). We limit a queuepair to
80 * 32 KB, or 16 KB per queue for symmetrical pairs.
82 #define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
85 * We have a fixed set of resource IDs available in the VMX.
86 * This allows us to have a very simple implementation since we statically
87 * know how many will create datagram handles. If a new caller arrives and
88 * we have run out of slots we can manually increment the maximum size of
89 * available resource IDs.
91 * VMCI reserved hypervisor datagram resource IDs.
94 VMCI_RESOURCES_QUERY = 0,
95 VMCI_GET_CONTEXT_ID = 1,
96 VMCI_SET_NOTIFY_BITMAP = 2,
97 VMCI_DOORBELL_LINK = 3,
98 VMCI_DOORBELL_UNLINK = 4,
99 VMCI_DOORBELL_NOTIFY = 5,
101 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
102 * obsoleted by the removal of VM to VM communication.
104 VMCI_DATAGRAM_REQUEST_MAP = 6,
105 VMCI_DATAGRAM_REMOVE_MAP = 7,
106 VMCI_EVENT_SUBSCRIBE = 8,
107 VMCI_EVENT_UNSUBSCRIBE = 9,
108 VMCI_QUEUEPAIR_ALLOC = 10,
109 VMCI_QUEUEPAIR_DETACH = 11,
112 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
113 * WS 7.0/7.1 and ESX 4.1
115 VMCI_HGFS_TRANSPORT = 13,
116 VMCI_UNITY_PBRPC_REGISTER = 14,
117 VMCI_RPC_PRIVILEGED = 15,
118 VMCI_RPC_UNPRIVILEGED = 16,
119 VMCI_RESOURCE_MAX = 17,
123 * struct vmci_handle - Ownership information structure
124 * @context: The VMX context ID.
125 * @resource: The resource ID (used for locating in resource hash).
127 * The vmci_handle structure is used to track resources used within
135 #define vmci_make_handle(_cid, _rid) \
136 (struct vmci_handle){ .context = _cid, .resource = _rid }
138 static inline bool vmci_handle_is_equal(struct vmci_handle h1,
139 struct vmci_handle h2)
141 return h1.context == h2.context && h1.resource == h2.resource;
144 #define VMCI_INVALID_ID ~0
145 static const struct vmci_handle VMCI_INVALID_HANDLE = {
146 .context = VMCI_INVALID_ID,
147 .resource = VMCI_INVALID_ID
150 static inline bool vmci_handle_is_invalid(struct vmci_handle h)
152 return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
156 * The below defines can be used to send anonymous requests.
157 * This also indicates that no response is expected.
159 #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
160 #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
161 static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
162 .context = VMCI_ANON_SRC_CONTEXT_ID,
163 .resource = VMCI_ANON_SRC_RESOURCE_ID
166 /* The lowest 16 context ids are reserved for internal use. */
167 #define VMCI_RESERVED_CID_LIMIT ((u32) 16)
170 * Hypervisor context id, used for calling into hypervisor
171 * supplied services from the VM.
173 #define VMCI_HYPERVISOR_CONTEXT_ID 0
176 * Well-known context id, a logical context that contains a set of
177 * well-known services. This context ID is now obsolete.
179 #define VMCI_WELL_KNOWN_CONTEXT_ID 1
182 * Context ID used by host endpoints.
184 #define VMCI_HOST_CONTEXT_ID 2
186 #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
187 (_cid) > VMCI_HOST_CONTEXT_ID)
190 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
191 * handles that refer to a specific context.
193 #define VMCI_CONTEXT_RESOURCE_ID 0
199 VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
200 VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
201 VMCI_SUCCESS_LAST_DETACH = 3,
202 VMCI_SUCCESS_ACCESS_GRANTED = 2,
203 VMCI_SUCCESS_ENTRY_DEAD = 1,
205 VMCI_ERROR_INVALID_RESOURCE = (-1),
206 VMCI_ERROR_INVALID_ARGS = (-2),
207 VMCI_ERROR_NO_MEM = (-3),
208 VMCI_ERROR_DATAGRAM_FAILED = (-4),
209 VMCI_ERROR_MORE_DATA = (-5),
210 VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
211 VMCI_ERROR_NO_ACCESS = (-7),
212 VMCI_ERROR_NO_HANDLE = (-8),
213 VMCI_ERROR_DUPLICATE_ENTRY = (-9),
214 VMCI_ERROR_DST_UNREACHABLE = (-10),
215 VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
216 VMCI_ERROR_INVALID_PRIV = (-12),
217 VMCI_ERROR_GENERIC = (-13),
218 VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
219 VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
220 VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
221 VMCI_ERROR_NO_PROCESS = (-17),
222 VMCI_ERROR_NO_DATAGRAM = (-18),
223 VMCI_ERROR_NO_RESOURCES = (-19),
224 VMCI_ERROR_UNAVAILABLE = (-20),
225 VMCI_ERROR_NOT_FOUND = (-21),
226 VMCI_ERROR_ALREADY_EXISTS = (-22),
227 VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
228 VMCI_ERROR_INVALID_SIZE = (-24),
229 VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
230 VMCI_ERROR_TIMEOUT = (-26),
231 VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
232 VMCI_ERROR_INCORRECT_IRQL = (-28),
233 VMCI_ERROR_EVENT_UNKNOWN = (-29),
234 VMCI_ERROR_OBSOLETE = (-30),
235 VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
236 VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
237 VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
238 VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
239 VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
240 VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
241 VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
242 VMCI_ERROR_MODULE_NOT_LOADED = (-38),
243 VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
244 VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
245 VMCI_ERROR_WOULD_BLOCK = (-41),
247 /* VMCI clients should return error code within this range */
248 VMCI_ERROR_CLIENT_MIN = (-500),
249 VMCI_ERROR_CLIENT_MAX = (-550),
251 /* Internal error codes. */
252 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
255 /* VMCI reserved events. */
257 /* Only applicable to guest endpoints */
258 VMCI_EVENT_CTX_ID_UPDATE = 0,
260 /* Applicable to guest and host */
261 VMCI_EVENT_CTX_REMOVED = 1,
263 /* Only applicable to guest endpoints */
264 VMCI_EVENT_QP_RESUMED = 2,
266 /* Applicable to guest and host */
267 VMCI_EVENT_QP_PEER_ATTACH = 3,
269 /* Applicable to guest and host */
270 VMCI_EVENT_QP_PEER_DETACH = 4,
273 * Applicable to VMX and vmk. On vmk,
274 * this event has the Context payload type.
276 VMCI_EVENT_MEM_ACCESS_ON = 5,
279 * Applicable to VMX and vmk. Same as
280 * above for the payload type.
282 VMCI_EVENT_MEM_ACCESS_OFF = 6,
287 * Of the above events, a few are reserved for use in the VMX, and
288 * other endpoints (guest and host kernel) should not use them. For
289 * the rest of the events, we allow both host and guest endpoints to
290 * subscribe to them, to maintain the same API for host and guest
293 #define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
294 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
296 #define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
297 !VMCI_EVENT_VALID_VMX(_event))
299 /* Reserved guest datagram resource ids. */
300 #define VMCI_EVENT_HANDLER 0
303 * VMCI coarse-grained privileges (per context or host
304 * process/endpoint. An entity with the restricted flag is only
305 * allowed to interact with the hypervisor and trusted entities.
308 VMCI_NO_PRIVILEGE_FLAGS = 0,
309 VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
310 VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
311 VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
312 VMCI_PRIVILEGE_FLAG_TRUSTED),
313 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
314 VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
315 VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
318 /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
319 #define VMCI_RESERVED_RESOURCE_ID_MAX 1023
324 * Increment major version when you make an incompatible change.
325 * Compatibility goes both ways (old driver with new executable
326 * as well as new driver with old executable).
329 /* Never change VMCI_VERSION_SHIFT_WIDTH */
330 #define VMCI_VERSION_SHIFT_WIDTH 16
331 #define VMCI_MAKE_VERSION(_major, _minor) \
332 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
334 #define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
335 #define VMCI_VERSION_MINOR(v) ((u16) (v))
338 * VMCI_VERSION is always the current version. Subsequently listed
339 * versions are ways of detecting previous versions of the connecting
340 * application (i.e., VMX).
342 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
345 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
348 * VMCI_VERSION_HOSTQP: This version introduced host end point support
349 * for hosted products.
351 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
352 * support for host end-points.
354 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
355 * represent the version of a VMX which doesn't call into the driver
356 * with ioctl VERSION2 and thus doesn't establish its version with the
360 #define VMCI_VERSION VMCI_VERSION_NOVMVM
361 #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
362 #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
363 #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
364 #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
365 #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
367 #define VMCI_SOCKETS_MAKE_VERSION(_p) \
368 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
371 * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
372 * we start at sequence 9f. This gives us the same values that our shipping
373 * products use, starting at 1951, provided we leave out the direction and
374 * structure size. Note that VMMon occupies the block following us, starting
377 #define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
378 #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
379 #define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
380 #define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
381 #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
382 #define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
383 #define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
384 #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
385 #define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
386 #define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
387 #define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
388 #define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
389 #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
390 #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
391 #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
392 #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
393 #define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
394 #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
395 #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
396 #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
397 /*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
400 * struct vmci_queue_header - VMCI Queue Header information.
402 * A Queue cannot stand by itself as designed. Each Queue's header
403 * contains a pointer into itself (the producer_tail) and into its peer
404 * (consumer_head). The reason for the separation is one of
405 * accessibility: Each end-point can modify two things: where the next
406 * location to enqueue is within its produce_q (producer_tail); and
407 * where the next dequeue location is in its consume_q (consumer_head).
409 * An end-point cannot modify the pointers of its peer (guest to
410 * guest; NOTE that in the host both queue headers are mapped r/w).
411 * But, each end-point needs read access to both Queue header
412 * structures in order to determine how much space is used (or left)
413 * in the Queue. This is because for an end-point to know how full
414 * its produce_q is, it needs to use the consumer_head that points into
415 * the produce_q but -that- consumer_head is in the Queue header for
416 * that end-points consume_q.
418 * Thoroughly confused? Sorry.
420 * producer_tail: the point to enqueue new entrants. When you approach
421 * a line in a store, for example, you walk up to the tail.
423 * consumer_head: the point in the queue from which the next element is
424 * dequeued. In other words, who is next in line is he who is at the
427 * Also, producer_tail points to an empty byte in the Queue, whereas
428 * consumer_head points to a valid byte of data (unless producer_tail ==
429 * consumer_head in which case consumer_head does not point to a valid
432 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
433 * the range [0, size-1].
435 * If produce_q_header->producer_tail == consume_q_header->consumer_head
436 * then the produce_q is empty.
438 struct vmci_queue_header {
439 /* All fields are 64bit and aligned. */
440 struct vmci_handle handle; /* Identifier. */
441 atomic64_t producer_tail; /* Offset in this queue. */
442 atomic64_t consumer_head; /* Offset in peer queue. */
446 * struct vmci_datagram - Base struct for vmci datagrams.
447 * @dst: A vmci_handle that tracks the destination of the datagram.
448 * @src: A vmci_handle that tracks the source of the datagram.
449 * @payload_size: The size of the payload.
451 * vmci_datagram structs are used when sending vmci datagrams. They include
452 * the necessary source and destination information to properly route
453 * the information along with the size of the package.
455 struct vmci_datagram {
456 struct vmci_handle dst;
457 struct vmci_handle src;
462 * Second flag is for creating a well-known handle instead of a per context
463 * handle. Next flag is for deferring datagram delivery, so that the
464 * datagram callback is invoked in a delayed context (not interrupt context).
466 #define VMCI_FLAG_DG_NONE 0
467 #define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0)
468 #define VMCI_FLAG_ANYCID_DG_HND BIT(1)
469 #define VMCI_FLAG_DG_DELAYED_CB BIT(2)
472 * Maximum supported size of a VMCI datagram for routable datagrams.
473 * Datagrams going to the hypervisor are allowed to be larger.
475 #define VMCI_MAX_DG_SIZE (17 * 4096)
476 #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
477 sizeof(struct vmci_datagram))
478 #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
479 sizeof(struct vmci_datagram))
480 #define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
481 #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
482 #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
483 #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
485 struct vmci_event_payload_qp {
486 struct vmci_handle handle; /* queue_pair handle. */
487 u32 peer_id; /* Context id of attaching/detaching VM. */
491 /* Flags for VMCI queue_pair API. */
493 /* Fail alloc if QP not created by peer. */
494 VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
496 /* Only allow attaches from local context. */
497 VMCI_QPFLAG_LOCAL = 1 << 1,
499 /* Host won't block when guest is quiesced. */
500 VMCI_QPFLAG_NONBLOCK = 1 << 2,
502 /* Pin data pages in ESX. Used with NONBLOCK */
503 VMCI_QPFLAG_PINNED = 1 << 3,
505 /* Update the following flag when adding new flags. */
506 VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
507 VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
509 /* Convenience flags */
510 VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
511 VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
515 * We allow at least 1024 more event datagrams from the hypervisor past the
516 * normally allowed datagrams pending for a given context. We define this
517 * limit on event datagrams from the hypervisor to guard against DoS attack
518 * from a malicious VM which could repeatedly attach to and detach from a queue
519 * pair, causing events to be queued at the destination VM. However, the rate
520 * at which such events can be generated is small since it requires a VM exit
521 * and handling of queue pair attach/detach call at the hypervisor. Event
522 * datagrams may be queued up at the destination VM if it has interrupts
523 * disabled or if it is not draining events for some other reason. 1024
524 * datagrams is a grossly conservative estimate of the time for which
525 * interrupts may be disabled in the destination VM, but at the same time does
526 * not exacerbate the memory pressure problem on the host by much (size of each
527 * event datagram is small).
529 #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
530 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
531 1024 * (sizeof(struct vmci_datagram) + \
532 sizeof(struct vmci_event_data_max)))
535 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
536 * hypervisor resources. Struct size is 16 bytes. All fields in struct are
537 * aligned to their natural alignment.
539 struct vmci_resource_query_hdr {
540 struct vmci_datagram hdr;
546 * Convenience struct for negotiating vectors. Must match layout of
547 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
549 struct vmci_resource_query_msg {
556 * The maximum number of resources that can be queried using
557 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
558 * bits of a positive return value. Negative values are reserved for
561 #define VMCI_RESOURCE_QUERY_MAX_NUM 31
563 /* Maximum size for the VMCI_RESOURCE_QUERY request. */
564 #define VMCI_RESOURCE_QUERY_MAX_SIZE \
565 (sizeof(struct vmci_resource_query_hdr) + \
566 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
569 * Struct used for setting the notification bitmap. All fields in
570 * struct are aligned to their natural alignment.
572 struct vmci_notify_bm_set_msg {
573 struct vmci_datagram hdr;
581 * Struct used for linking a doorbell handle with an index in the
582 * notify bitmap. All fields in struct are aligned to their natural
585 struct vmci_doorbell_link_msg {
586 struct vmci_datagram hdr;
587 struct vmci_handle handle;
592 * Struct used for unlinking a doorbell handle from an index in the
593 * notify bitmap. All fields in struct are aligned to their natural
596 struct vmci_doorbell_unlink_msg {
597 struct vmci_datagram hdr;
598 struct vmci_handle handle;
602 * Struct used for generating a notification on a doorbell handle. All
603 * fields in struct are aligned to their natural alignment.
605 struct vmci_doorbell_notify_msg {
606 struct vmci_datagram hdr;
607 struct vmci_handle handle;
611 * This struct is used to contain data for events. Size of this struct is a
612 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
614 struct vmci_event_data {
615 u32 event; /* 4 bytes. */
617 /* Event payload is put here. */
621 * Define the different VMCI_EVENT payload data types here. All structs must
622 * be a multiple of 8 bytes, and fields must be aligned to their natural
625 struct vmci_event_payld_ctx {
626 u32 context_id; /* 4 bytes. */
630 struct vmci_event_payld_qp {
631 struct vmci_handle handle; /* queue_pair handle. */
632 u32 peer_id; /* Context id of attaching/detaching VM. */
637 * We define the following struct to get the size of the maximum event
638 * data the hypervisor may send to the guest. If adding a new event
639 * payload type above, add it to the following struct too (inside the
642 struct vmci_event_data_max {
643 struct vmci_event_data event_data;
645 struct vmci_event_payld_ctx context_payload;
646 struct vmci_event_payld_qp qp_payload;
651 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
652 * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
653 * in struct are aligned to their natural alignment.
655 struct vmci_event_msg {
656 struct vmci_datagram hdr;
658 /* Has event type and payload. */
659 struct vmci_event_data event_data;
661 /* Payload gets put here. */
664 /* Event with context payload. */
665 struct vmci_event_ctx {
666 struct vmci_event_msg msg;
667 struct vmci_event_payld_ctx payload;
670 /* Event with QP payload. */
671 struct vmci_event_qp {
672 struct vmci_event_msg msg;
673 struct vmci_event_payld_qp payload;
677 * Structs used for queue_pair alloc and detach messages. We align fields of
678 * these structs to 64bit boundaries.
680 struct vmci_qp_alloc_msg {
681 struct vmci_datagram hdr;
682 struct vmci_handle handle;
689 /* List of PPNs placed here. */
692 struct vmci_qp_detach_msg {
693 struct vmci_datagram hdr;
694 struct vmci_handle handle;
697 /* VMCI Doorbell API. */
698 #define VMCI_FLAG_DELAYED_CB BIT(0)
700 typedef void (*vmci_callback) (void *client_data);
703 * struct vmci_qp - A vmw_vmci queue pair handle.
705 * This structure is used as a handle to a queue pair created by
706 * VMCI. It is intentionally left opaque to clients.
710 /* Callback needed for correctly waiting on events. */
711 typedef int (*vmci_datagram_recv_cb) (void *client_data,
712 struct vmci_datagram *msg);
714 /* VMCI Event API. */
715 typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
719 * We use the following inline function to access the payload data
720 * associated with an event data.
722 static inline const void *
723 vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
725 return (const char *)ev_data + sizeof(*ev_data);
728 static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
730 return (void *)vmci_event_data_const_payload(ev_data);
734 * Helper to read a value from a head or tail pointer. For X86_32, the
735 * pointer is treated as a 32bit value, since the pointer value
736 * never exceeds a 32bit value in this case. Also, doing an
737 * atomic64_read on X86_32 uniprocessor systems may be implemented
738 * as a non locked cmpxchg8b, that may end up overwriting updates done
739 * by the VMCI device to the memory location. On 32bit SMP, the lock
740 * prefix will be used, so correctness isn't an issue, but using a
741 * 64bit operation still adds unnecessary overhead.
743 static inline u64 vmci_q_read_pointer(atomic64_t *var)
745 #if defined(CONFIG_X86_32)
746 return atomic_read((atomic_t *)var);
748 return atomic64_read(var);
753 * Helper to set the value of a head or tail pointer. For X86_32, the
754 * pointer is treated as a 32bit value, since the pointer value
755 * never exceeds a 32bit value in this case. On 32bit SMP, using a
756 * locked cmpxchg8b adds unnecessary overhead.
758 static inline void vmci_q_set_pointer(atomic64_t *var,
761 #if defined(CONFIG_X86_32)
762 return atomic_set((atomic_t *)var, (u32)new_val);
764 return atomic64_set(var, new_val);
769 * Helper to add a given offset to a head or tail pointer. Wraps the
770 * value of the pointer around the max size of the queue.
772 static inline void vmci_qp_add_pointer(atomic64_t *var,
776 u64 new_val = vmci_q_read_pointer(var);
778 if (new_val >= size - add)
783 vmci_q_set_pointer(var, new_val);
787 * Helper routine to get the Producer Tail from the supplied queue.
790 vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
792 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
793 return vmci_q_read_pointer(&qh->producer_tail);
797 * Helper routine to get the Consumer Head from the supplied queue.
800 vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
802 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
803 return vmci_q_read_pointer(&qh->consumer_head);
807 * Helper routine to increment the Producer Tail. Fundamentally,
808 * vmci_qp_add_pointer() is used to manipulate the tail itself.
811 vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
815 vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
819 * Helper routine to increment the Consumer Head. Fundamentally,
820 * vmci_qp_add_pointer() is used to manipulate the head itself.
823 vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
827 vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
831 * Helper routine for getting the head and the tail pointer for a queue.
832 * Both the VMCIQueues are needed to get both the pointers for one queue.
835 vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
836 const struct vmci_queue_header *consume_q_header,
841 *producer_tail = vmci_q_header_producer_tail(produce_q_header);
844 *consumer_head = vmci_q_header_consumer_head(consume_q_header);
847 static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
848 const struct vmci_handle handle)
850 q_header->handle = handle;
851 atomic64_set(&q_header->producer_tail, 0);
852 atomic64_set(&q_header->consumer_head, 0);
856 * Finds available free space in a produce queue to enqueue more
857 * data or reports an error if queue pair corruption is detected.
860 vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
861 const struct vmci_queue_header *consume_q_header,
862 const u64 produce_q_size)
868 tail = vmci_q_header_producer_tail(produce_q_header);
869 head = vmci_q_header_consumer_head(consume_q_header);
871 if (tail >= produce_q_size || head >= produce_q_size)
872 return VMCI_ERROR_INVALID_SIZE;
875 * Deduct 1 to avoid tail becoming equal to head which causes
876 * ambiguity. If head and tail are equal it means that the
880 free_space = produce_q_size - (tail - head) - 1;
882 free_space = head - tail - 1;
888 * vmci_q_header_free_space() does all the heavy lifting of
889 * determing the number of free bytes in a Queue. This routine,
890 * then subtracts that size from the full size of the Queue so
891 * the caller knows how many bytes are ready to be dequeued.
893 * On success, available data size in bytes (up to MAX_INT64).
894 * On failure, appropriate error code.
897 vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
898 const struct vmci_queue_header *produce_q_header,
899 const u64 consume_q_size)
903 free_space = vmci_q_header_free_space(consume_q_header,
904 produce_q_header, consume_q_size);
905 if (free_space < VMCI_SUCCESS)
908 return consume_q_size - free_space - 1;
912 #endif /* _VMW_VMCI_DEF_H_ */