4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #ifndef _VMW_VMCI_DEF_H_
17 #define _VMW_VMCI_DEF_H_
19 #include <linux/atomic.h>
21 /* Register offsets. */
22 #define VMCI_STATUS_ADDR 0x00
23 #define VMCI_CONTROL_ADDR 0x04
24 #define VMCI_ICR_ADDR 0x08
25 #define VMCI_IMR_ADDR 0x0c
26 #define VMCI_DATA_OUT_ADDR 0x10
27 #define VMCI_DATA_IN_ADDR 0x14
28 #define VMCI_CAPS_ADDR 0x18
29 #define VMCI_RESULT_LOW_ADDR 0x1c
30 #define VMCI_RESULT_HIGH_ADDR 0x20
32 /* Max number of devices. */
33 #define VMCI_MAX_DEVICES 1
35 /* Status register bits. */
36 #define VMCI_STATUS_INT_ON 0x1
38 /* Control register bits. */
39 #define VMCI_CONTROL_RESET 0x1
40 #define VMCI_CONTROL_INT_ENABLE 0x2
41 #define VMCI_CONTROL_INT_DISABLE 0x4
43 /* Capabilities register bits. */
44 #define VMCI_CAPS_HYPERCALL 0x1
45 #define VMCI_CAPS_GUESTCALL 0x2
46 #define VMCI_CAPS_DATAGRAM 0x4
47 #define VMCI_CAPS_NOTIFICATIONS 0x8
49 /* Interrupt Cause register bits. */
50 #define VMCI_ICR_DATAGRAM 0x1
51 #define VMCI_ICR_NOTIFICATION 0x2
53 /* Interrupt Mask register bits. */
54 #define VMCI_IMR_DATAGRAM 0x1
55 #define VMCI_IMR_NOTIFICATION 0x2
59 VMCI_INTR_TYPE_INTX = 0,
60 VMCI_INTR_TYPE_MSI = 1,
61 VMCI_INTR_TYPE_MSIX = 2,
64 /* Maximum MSI/MSI-X interrupt vectors in the device. */
65 #define VMCI_MAX_INTRS 2
68 * Supported interrupt vectors. There is one for each ICR value above,
69 * but here they indicate the position in the vector array/message ID.
72 VMCI_INTR_DATAGRAM = 0,
73 VMCI_INTR_NOTIFICATION = 1,
77 * A single VMCI device has an upper limit of 128MB on the amount of
78 * memory that can be used for queue pairs. Since each queue pair
79 * consists of at least two pages, the memory limit also dictates the
80 * number of queue pairs a guest can create.
82 #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
83 #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
86 * There can be at most PAGE_SIZE doorbells since there is one doorbell
87 * per byte in the doorbell bitmap page.
89 #define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
92 * Queues with pre-mapped data pages must be small, so that we don't pin
93 * too much kernel memory (especially on vmkernel). We limit a queuepair to
94 * 32 KB, or 16 KB per queue for symmetrical pairs.
96 #define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
99 * We have a fixed set of resource IDs available in the VMX.
100 * This allows us to have a very simple implementation since we statically
101 * know how many will create datagram handles. If a new caller arrives and
102 * we have run out of slots we can manually increment the maximum size of
103 * available resource IDs.
105 * VMCI reserved hypervisor datagram resource IDs.
108 VMCI_RESOURCES_QUERY = 0,
109 VMCI_GET_CONTEXT_ID = 1,
110 VMCI_SET_NOTIFY_BITMAP = 2,
111 VMCI_DOORBELL_LINK = 3,
112 VMCI_DOORBELL_UNLINK = 4,
113 VMCI_DOORBELL_NOTIFY = 5,
115 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
116 * obsoleted by the removal of VM to VM communication.
118 VMCI_DATAGRAM_REQUEST_MAP = 6,
119 VMCI_DATAGRAM_REMOVE_MAP = 7,
120 VMCI_EVENT_SUBSCRIBE = 8,
121 VMCI_EVENT_UNSUBSCRIBE = 9,
122 VMCI_QUEUEPAIR_ALLOC = 10,
123 VMCI_QUEUEPAIR_DETACH = 11,
126 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
127 * WS 7.0/7.1 and ESX 4.1
129 VMCI_HGFS_TRANSPORT = 13,
130 VMCI_UNITY_PBRPC_REGISTER = 14,
131 VMCI_RPC_PRIVILEGED = 15,
132 VMCI_RPC_UNPRIVILEGED = 16,
133 VMCI_RESOURCE_MAX = 17,
137 * struct vmci_handle - Ownership information structure
138 * @context: The VMX context ID.
139 * @resource: The resource ID (used for locating in resource hash).
141 * The vmci_handle structure is used to track resources used within
149 #define vmci_make_handle(_cid, _rid) \
150 (struct vmci_handle){ .context = _cid, .resource = _rid }
152 static inline bool vmci_handle_is_equal(struct vmci_handle h1,
153 struct vmci_handle h2)
155 return h1.context == h2.context && h1.resource == h2.resource;
158 #define VMCI_INVALID_ID ~0
159 static const struct vmci_handle VMCI_INVALID_HANDLE = {
160 .context = VMCI_INVALID_ID,
161 .resource = VMCI_INVALID_ID
164 static inline bool vmci_handle_is_invalid(struct vmci_handle h)
166 return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
170 * The below defines can be used to send anonymous requests.
171 * This also indicates that no response is expected.
173 #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
174 #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
175 static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
176 .context = VMCI_ANON_SRC_CONTEXT_ID,
177 .resource = VMCI_ANON_SRC_RESOURCE_ID
180 /* The lowest 16 context ids are reserved for internal use. */
181 #define VMCI_RESERVED_CID_LIMIT ((u32) 16)
184 * Hypervisor context id, used for calling into hypervisor
185 * supplied services from the VM.
187 #define VMCI_HYPERVISOR_CONTEXT_ID 0
190 * Well-known context id, a logical context that contains a set of
191 * well-known services. This context ID is now obsolete.
193 #define VMCI_WELL_KNOWN_CONTEXT_ID 1
196 * Context ID used by host endpoints.
198 #define VMCI_HOST_CONTEXT_ID 2
200 #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
201 (_cid) > VMCI_HOST_CONTEXT_ID)
204 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
205 * handles that refer to a specific context.
207 #define VMCI_CONTEXT_RESOURCE_ID 0
213 VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
214 VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
215 VMCI_SUCCESS_LAST_DETACH = 3,
216 VMCI_SUCCESS_ACCESS_GRANTED = 2,
217 VMCI_SUCCESS_ENTRY_DEAD = 1,
219 VMCI_ERROR_INVALID_RESOURCE = (-1),
220 VMCI_ERROR_INVALID_ARGS = (-2),
221 VMCI_ERROR_NO_MEM = (-3),
222 VMCI_ERROR_DATAGRAM_FAILED = (-4),
223 VMCI_ERROR_MORE_DATA = (-5),
224 VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
225 VMCI_ERROR_NO_ACCESS = (-7),
226 VMCI_ERROR_NO_HANDLE = (-8),
227 VMCI_ERROR_DUPLICATE_ENTRY = (-9),
228 VMCI_ERROR_DST_UNREACHABLE = (-10),
229 VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
230 VMCI_ERROR_INVALID_PRIV = (-12),
231 VMCI_ERROR_GENERIC = (-13),
232 VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
233 VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
234 VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
235 VMCI_ERROR_NO_PROCESS = (-17),
236 VMCI_ERROR_NO_DATAGRAM = (-18),
237 VMCI_ERROR_NO_RESOURCES = (-19),
238 VMCI_ERROR_UNAVAILABLE = (-20),
239 VMCI_ERROR_NOT_FOUND = (-21),
240 VMCI_ERROR_ALREADY_EXISTS = (-22),
241 VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
242 VMCI_ERROR_INVALID_SIZE = (-24),
243 VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
244 VMCI_ERROR_TIMEOUT = (-26),
245 VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
246 VMCI_ERROR_INCORRECT_IRQL = (-28),
247 VMCI_ERROR_EVENT_UNKNOWN = (-29),
248 VMCI_ERROR_OBSOLETE = (-30),
249 VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
250 VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
251 VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
252 VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
253 VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
254 VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
255 VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
256 VMCI_ERROR_MODULE_NOT_LOADED = (-38),
257 VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
258 VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
259 VMCI_ERROR_WOULD_BLOCK = (-41),
261 /* VMCI clients should return error code within this range */
262 VMCI_ERROR_CLIENT_MIN = (-500),
263 VMCI_ERROR_CLIENT_MAX = (-550),
265 /* Internal error codes. */
266 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
269 /* VMCI reserved events. */
271 /* Only applicable to guest endpoints */
272 VMCI_EVENT_CTX_ID_UPDATE = 0,
274 /* Applicable to guest and host */
275 VMCI_EVENT_CTX_REMOVED = 1,
277 /* Only applicable to guest endpoints */
278 VMCI_EVENT_QP_RESUMED = 2,
280 /* Applicable to guest and host */
281 VMCI_EVENT_QP_PEER_ATTACH = 3,
283 /* Applicable to guest and host */
284 VMCI_EVENT_QP_PEER_DETACH = 4,
287 * Applicable to VMX and vmk. On vmk,
288 * this event has the Context payload type.
290 VMCI_EVENT_MEM_ACCESS_ON = 5,
293 * Applicable to VMX and vmk. Same as
294 * above for the payload type.
296 VMCI_EVENT_MEM_ACCESS_OFF = 6,
301 * Of the above events, a few are reserved for use in the VMX, and
302 * other endpoints (guest and host kernel) should not use them. For
303 * the rest of the events, we allow both host and guest endpoints to
304 * subscribe to them, to maintain the same API for host and guest
307 #define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
308 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
310 #define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
311 !VMCI_EVENT_VALID_VMX(_event))
313 /* Reserved guest datagram resource ids. */
314 #define VMCI_EVENT_HANDLER 0
317 * VMCI coarse-grained privileges (per context or host
318 * process/endpoint. An entity with the restricted flag is only
319 * allowed to interact with the hypervisor and trusted entities.
322 VMCI_NO_PRIVILEGE_FLAGS = 0,
323 VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
324 VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
325 VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
326 VMCI_PRIVILEGE_FLAG_TRUSTED),
327 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
328 VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
329 VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
332 /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
333 #define VMCI_RESERVED_RESOURCE_ID_MAX 1023
338 * Increment major version when you make an incompatible change.
339 * Compatibility goes both ways (old driver with new executable
340 * as well as new driver with old executable).
343 /* Never change VMCI_VERSION_SHIFT_WIDTH */
344 #define VMCI_VERSION_SHIFT_WIDTH 16
345 #define VMCI_MAKE_VERSION(_major, _minor) \
346 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
348 #define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
349 #define VMCI_VERSION_MINOR(v) ((u16) (v))
352 * VMCI_VERSION is always the current version. Subsequently listed
353 * versions are ways of detecting previous versions of the connecting
354 * application (i.e., VMX).
356 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
359 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
362 * VMCI_VERSION_HOSTQP: This version introduced host end point support
363 * for hosted products.
365 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
366 * support for host end-points.
368 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
369 * represent the version of a VMX which doesn't call into the driver
370 * with ioctl VERSION2 and thus doesn't establish its version with the
374 #define VMCI_VERSION VMCI_VERSION_NOVMVM
375 #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
376 #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
377 #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
378 #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
379 #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
381 #define VMCI_SOCKETS_MAKE_VERSION(_p) \
382 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
385 * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
386 * we start at sequence 9f. This gives us the same values that our shipping
387 * products use, starting at 1951, provided we leave out the direction and
388 * structure size. Note that VMMon occupies the block following us, starting
391 #define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
392 #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
393 #define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
394 #define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
395 #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
396 #define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
397 #define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
398 #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
399 #define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
400 #define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
401 #define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
402 #define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
403 #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
404 #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
405 #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
406 #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
407 #define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
408 #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
409 #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
410 #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
411 /*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
414 * struct vmci_queue_header - VMCI Queue Header information.
416 * A Queue cannot stand by itself as designed. Each Queue's header
417 * contains a pointer into itself (the producer_tail) and into its peer
418 * (consumer_head). The reason for the separation is one of
419 * accessibility: Each end-point can modify two things: where the next
420 * location to enqueue is within its produce_q (producer_tail); and
421 * where the next dequeue location is in its consume_q (consumer_head).
423 * An end-point cannot modify the pointers of its peer (guest to
424 * guest; NOTE that in the host both queue headers are mapped r/w).
425 * But, each end-point needs read access to both Queue header
426 * structures in order to determine how much space is used (or left)
427 * in the Queue. This is because for an end-point to know how full
428 * its produce_q is, it needs to use the consumer_head that points into
429 * the produce_q but -that- consumer_head is in the Queue header for
430 * that end-points consume_q.
432 * Thoroughly confused? Sorry.
434 * producer_tail: the point to enqueue new entrants. When you approach
435 * a line in a store, for example, you walk up to the tail.
437 * consumer_head: the point in the queue from which the next element is
438 * dequeued. In other words, who is next in line is he who is at the
441 * Also, producer_tail points to an empty byte in the Queue, whereas
442 * consumer_head points to a valid byte of data (unless producer_tail ==
443 * consumer_head in which case consumer_head does not point to a valid
446 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
447 * the range [0, size-1].
449 * If produce_q_header->producer_tail == consume_q_header->consumer_head
450 * then the produce_q is empty.
452 struct vmci_queue_header {
453 /* All fields are 64bit and aligned. */
454 struct vmci_handle handle; /* Identifier. */
455 atomic64_t producer_tail; /* Offset in this queue. */
456 atomic64_t consumer_head; /* Offset in peer queue. */
460 * struct vmci_datagram - Base struct for vmci datagrams.
461 * @dst: A vmci_handle that tracks the destination of the datagram.
462 * @src: A vmci_handle that tracks the source of the datagram.
463 * @payload_size: The size of the payload.
465 * vmci_datagram structs are used when sending vmci datagrams. They include
466 * the necessary source and destination information to properly route
467 * the information along with the size of the package.
469 struct vmci_datagram {
470 struct vmci_handle dst;
471 struct vmci_handle src;
476 * Second flag is for creating a well-known handle instead of a per context
477 * handle. Next flag is for deferring datagram delivery, so that the
478 * datagram callback is invoked in a delayed context (not interrupt context).
480 #define VMCI_FLAG_DG_NONE 0
481 #define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
482 #define VMCI_FLAG_ANYCID_DG_HND 0x2
483 #define VMCI_FLAG_DG_DELAYED_CB 0x4
486 * Maximum supported size of a VMCI datagram for routable datagrams.
487 * Datagrams going to the hypervisor are allowed to be larger.
489 #define VMCI_MAX_DG_SIZE (17 * 4096)
490 #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
491 sizeof(struct vmci_datagram))
492 #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
493 sizeof(struct vmci_datagram))
494 #define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
495 #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
496 #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
497 #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
499 struct vmci_event_payload_qp {
500 struct vmci_handle handle; /* queue_pair handle. */
501 u32 peer_id; /* Context id of attaching/detaching VM. */
505 /* Flags for VMCI queue_pair API. */
507 /* Fail alloc if QP not created by peer. */
508 VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
510 /* Only allow attaches from local context. */
511 VMCI_QPFLAG_LOCAL = 1 << 1,
513 /* Host won't block when guest is quiesced. */
514 VMCI_QPFLAG_NONBLOCK = 1 << 2,
516 /* Pin data pages in ESX. Used with NONBLOCK */
517 VMCI_QPFLAG_PINNED = 1 << 3,
519 /* Update the following flag when adding new flags. */
520 VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
521 VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
523 /* Convenience flags */
524 VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
525 VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
529 * We allow at least 1024 more event datagrams from the hypervisor past the
530 * normally allowed datagrams pending for a given context. We define this
531 * limit on event datagrams from the hypervisor to guard against DoS attack
532 * from a malicious VM which could repeatedly attach to and detach from a queue
533 * pair, causing events to be queued at the destination VM. However, the rate
534 * at which such events can be generated is small since it requires a VM exit
535 * and handling of queue pair attach/detach call at the hypervisor. Event
536 * datagrams may be queued up at the destination VM if it has interrupts
537 * disabled or if it is not draining events for some other reason. 1024
538 * datagrams is a grossly conservative estimate of the time for which
539 * interrupts may be disabled in the destination VM, but at the same time does
540 * not exacerbate the memory pressure problem on the host by much (size of each
541 * event datagram is small).
543 #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
544 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
545 1024 * (sizeof(struct vmci_datagram) + \
546 sizeof(struct vmci_event_data_max)))
549 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
550 * hypervisor resources. Struct size is 16 bytes. All fields in struct are
551 * aligned to their natural alignment.
553 struct vmci_resource_query_hdr {
554 struct vmci_datagram hdr;
560 * Convenience struct for negotiating vectors. Must match layout of
561 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
563 struct vmci_resource_query_msg {
570 * The maximum number of resources that can be queried using
571 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
572 * bits of a positive return value. Negative values are reserved for
575 #define VMCI_RESOURCE_QUERY_MAX_NUM 31
577 /* Maximum size for the VMCI_RESOURCE_QUERY request. */
578 #define VMCI_RESOURCE_QUERY_MAX_SIZE \
579 (sizeof(struct vmci_resource_query_hdr) + \
580 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
583 * Struct used for setting the notification bitmap. All fields in
584 * struct are aligned to their natural alignment.
586 struct vmci_notify_bm_set_msg {
587 struct vmci_datagram hdr;
593 * Struct used for linking a doorbell handle with an index in the
594 * notify bitmap. All fields in struct are aligned to their natural
597 struct vmci_doorbell_link_msg {
598 struct vmci_datagram hdr;
599 struct vmci_handle handle;
604 * Struct used for unlinking a doorbell handle from an index in the
605 * notify bitmap. All fields in struct are aligned to their natural
608 struct vmci_doorbell_unlink_msg {
609 struct vmci_datagram hdr;
610 struct vmci_handle handle;
614 * Struct used for generating a notification on a doorbell handle. All
615 * fields in struct are aligned to their natural alignment.
617 struct vmci_doorbell_notify_msg {
618 struct vmci_datagram hdr;
619 struct vmci_handle handle;
623 * This struct is used to contain data for events. Size of this struct is a
624 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
626 struct vmci_event_data {
627 u32 event; /* 4 bytes. */
629 /* Event payload is put here. */
633 * Define the different VMCI_EVENT payload data types here. All structs must
634 * be a multiple of 8 bytes, and fields must be aligned to their natural
637 struct vmci_event_payld_ctx {
638 u32 context_id; /* 4 bytes. */
642 struct vmci_event_payld_qp {
643 struct vmci_handle handle; /* queue_pair handle. */
644 u32 peer_id; /* Context id of attaching/detaching VM. */
649 * We define the following struct to get the size of the maximum event
650 * data the hypervisor may send to the guest. If adding a new event
651 * payload type above, add it to the following struct too (inside the
654 struct vmci_event_data_max {
655 struct vmci_event_data event_data;
657 struct vmci_event_payld_ctx context_payload;
658 struct vmci_event_payld_qp qp_payload;
663 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
664 * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
665 * in struct are aligned to their natural alignment.
667 struct vmci_event_msg {
668 struct vmci_datagram hdr;
670 /* Has event type and payload. */
671 struct vmci_event_data event_data;
673 /* Payload gets put here. */
676 /* Event with context payload. */
677 struct vmci_event_ctx {
678 struct vmci_event_msg msg;
679 struct vmci_event_payld_ctx payload;
682 /* Event with QP payload. */
683 struct vmci_event_qp {
684 struct vmci_event_msg msg;
685 struct vmci_event_payld_qp payload;
689 * Structs used for queue_pair alloc and detach messages. We align fields of
690 * these structs to 64bit boundaries.
692 struct vmci_qp_alloc_msg {
693 struct vmci_datagram hdr;
694 struct vmci_handle handle;
701 /* List of PPNs placed here. */
704 struct vmci_qp_detach_msg {
705 struct vmci_datagram hdr;
706 struct vmci_handle handle;
709 /* VMCI Doorbell API. */
710 #define VMCI_FLAG_DELAYED_CB 0x01
712 typedef void (*vmci_callback) (void *client_data);
715 * struct vmci_qp - A vmw_vmci queue pair handle.
717 * This structure is used as a handle to a queue pair created by
718 * VMCI. It is intentionally left opaque to clients.
722 /* Callback needed for correctly waiting on events. */
723 typedef int (*vmci_datagram_recv_cb) (void *client_data,
724 struct vmci_datagram *msg);
726 /* VMCI Event API. */
727 typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
731 * We use the following inline function to access the payload data
732 * associated with an event data.
734 static inline const void *
735 vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
737 return (const char *)ev_data + sizeof(*ev_data);
740 static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
742 return (void *)vmci_event_data_const_payload(ev_data);
746 * Helper to read a value from a head or tail pointer. For X86_32, the
747 * pointer is treated as a 32bit value, since the pointer value
748 * never exceeds a 32bit value in this case. Also, doing an
749 * atomic64_read on X86_32 uniprocessor systems may be implemented
750 * as a non locked cmpxchg8b, that may end up overwriting updates done
751 * by the VMCI device to the memory location. On 32bit SMP, the lock
752 * prefix will be used, so correctness isn't an issue, but using a
753 * 64bit operation still adds unnecessary overhead.
755 static inline u64 vmci_q_read_pointer(atomic64_t *var)
757 #if defined(CONFIG_X86_32)
758 return atomic_read((atomic_t *)var);
760 return atomic64_read(var);
765 * Helper to set the value of a head or tail pointer. For X86_32, the
766 * pointer is treated as a 32bit value, since the pointer value
767 * never exceeds a 32bit value in this case. On 32bit SMP, using a
768 * locked cmpxchg8b adds unnecessary overhead.
770 static inline void vmci_q_set_pointer(atomic64_t *var,
773 #if defined(CONFIG_X86_32)
774 return atomic_set((atomic_t *)var, (u32)new_val);
776 return atomic64_set(var, new_val);
781 * Helper to add a given offset to a head or tail pointer. Wraps the
782 * value of the pointer around the max size of the queue.
784 static inline void vmci_qp_add_pointer(atomic64_t *var,
788 u64 new_val = vmci_q_read_pointer(var);
790 if (new_val >= size - add)
795 vmci_q_set_pointer(var, new_val);
799 * Helper routine to get the Producer Tail from the supplied queue.
802 vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
804 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
805 return vmci_q_read_pointer(&qh->producer_tail);
809 * Helper routine to get the Consumer Head from the supplied queue.
812 vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
814 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
815 return vmci_q_read_pointer(&qh->consumer_head);
819 * Helper routine to increment the Producer Tail. Fundamentally,
820 * vmci_qp_add_pointer() is used to manipulate the tail itself.
823 vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
827 vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
831 * Helper routine to increment the Consumer Head. Fundamentally,
832 * vmci_qp_add_pointer() is used to manipulate the head itself.
835 vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
839 vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
843 * Helper routine for getting the head and the tail pointer for a queue.
844 * Both the VMCIQueues are needed to get both the pointers for one queue.
847 vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
848 const struct vmci_queue_header *consume_q_header,
853 *producer_tail = vmci_q_header_producer_tail(produce_q_header);
856 *consumer_head = vmci_q_header_consumer_head(consume_q_header);
859 static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
860 const struct vmci_handle handle)
862 q_header->handle = handle;
863 atomic64_set(&q_header->producer_tail, 0);
864 atomic64_set(&q_header->consumer_head, 0);
868 * Finds available free space in a produce queue to enqueue more
869 * data or reports an error if queue pair corruption is detected.
872 vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
873 const struct vmci_queue_header *consume_q_header,
874 const u64 produce_q_size)
880 tail = vmci_q_header_producer_tail(produce_q_header);
881 head = vmci_q_header_consumer_head(consume_q_header);
883 if (tail >= produce_q_size || head >= produce_q_size)
884 return VMCI_ERROR_INVALID_SIZE;
887 * Deduct 1 to avoid tail becoming equal to head which causes
888 * ambiguity. If head and tail are equal it means that the
892 free_space = produce_q_size - (tail - head) - 1;
894 free_space = head - tail - 1;
900 * vmci_q_header_free_space() does all the heavy lifting of
901 * determing the number of free bytes in a Queue. This routine,
902 * then subtracts that size from the full size of the Queue so
903 * the caller knows how many bytes are ready to be dequeued.
905 * On success, available data size in bytes (up to MAX_INT64).
906 * On failure, appropriate error code.
909 vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
910 const struct vmci_queue_header *produce_q_header,
911 const u64 consume_q_size)
915 free_space = vmci_q_header_free_space(consume_q_header,
916 produce_q_header, consume_q_size);
917 if (free_space < VMCI_SUCCESS)
920 return consume_q_size - free_space - 1;
924 #endif /* _VMW_VMCI_DEF_H_ */