1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/miscdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/highmem.h>
13 #include <linux/atomic.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/slab.h>
20 #include <linux/file.h>
21 #include <linux/init.h>
22 #include <linux/poll.h>
23 #include <linux/pci.h>
24 #include <linux/smp.h>
28 #include "vmci_handle_array.h"
29 #include "vmci_queue_pair.h"
30 #include "vmci_datagram.h"
31 #include "vmci_doorbell.h"
32 #include "vmci_resource.h"
33 #include "vmci_context.h"
34 #include "vmci_driver.h"
35 #include "vmci_event.h"
37 #define VMCI_UTIL_NUM_RESOURCES 1
40 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
41 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
45 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
46 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
47 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
51 * VMCI driver initialization. This block can also be used to
52 * pass initial group membership etc.
54 struct vmci_init_blk {
59 /* VMCIqueue_pairAllocInfo_VMToVM */
60 struct vmci_qp_alloc_info_vmvm {
61 struct vmci_handle handle;
66 u64 produce_page_file; /* User VA. */
67 u64 consume_page_file; /* User VA. */
68 u64 produce_page_file_size; /* Size of the file name array. */
69 u64 consume_page_file_size; /* Size of the file name array. */
74 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
75 struct vmci_set_notify_info {
82 * Per-instance host state
84 struct vmci_host_dev {
85 struct vmci_ctx *context;
87 enum vmci_obj_type ct_type;
88 struct mutex lock; /* Mutex lock for vmci context access */
91 static struct vmci_ctx *host_context;
92 static bool vmci_host_device_initialized;
93 static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
96 * Determines whether the VMCI host personality is
97 * available. Since the core functionality of the host driver is
98 * always present, all guests could possibly use the host
99 * personality. However, to minimize the deviation from the
100 * pre-unified driver state of affairs, we only consider the host
101 * device active if there is no active guest device or if there
102 * are VMX'en with active VMCI contexts using the host device.
104 bool vmci_host_code_active(void)
106 return vmci_host_device_initialized &&
107 (!vmci_guest_code_active() ||
108 atomic_read(&vmci_host_active_users) > 0);
111 int vmci_host_users(void)
113 return atomic_read(&vmci_host_active_users);
117 * Called on open of /dev/vmci.
119 static int vmci_host_open(struct inode *inode, struct file *filp)
121 struct vmci_host_dev *vmci_host_dev;
123 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
124 if (vmci_host_dev == NULL)
127 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
128 mutex_init(&vmci_host_dev->lock);
129 filp->private_data = vmci_host_dev;
135 * Called on close of /dev/vmci, most often when the process
138 static int vmci_host_close(struct inode *inode, struct file *filp)
140 struct vmci_host_dev *vmci_host_dev = filp->private_data;
142 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
143 vmci_ctx_destroy(vmci_host_dev->context);
144 vmci_host_dev->context = NULL;
147 * The number of active contexts is used to track whether any
148 * VMX'en are using the host personality. It is incremented when
149 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
152 atomic_dec(&vmci_host_active_users);
154 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
156 kfree(vmci_host_dev);
157 filp->private_data = NULL;
162 * This is used to wake up the VMX when a VMCI call arrives, or
163 * to wake up select() or poll() at the next clock tick.
165 static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
167 struct vmci_host_dev *vmci_host_dev = filp->private_data;
168 struct vmci_ctx *context;
171 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
173 * Read context only if ct_type == VMCIOBJ_CONTEXT to make
174 * sure that context is initialized
176 context = vmci_host_dev->context;
178 /* Check for VMCI calls to this VM context. */
180 poll_wait(filp, &context->host_context.wait_queue,
183 spin_lock(&context->lock);
184 if (context->pending_datagrams > 0 ||
185 vmci_handle_arr_get_size(
186 context->pending_doorbell_array) > 0) {
189 spin_unlock(&context->lock);
195 * Copies the handles of a handle array into a user buffer, and
196 * returns the new length in userBufferSize. If the copy to the
197 * user buffer fails, the functions still returns VMCI_SUCCESS,
200 static int drv_cp_harray_to_user(void __user *user_buf_uva,
202 struct vmci_handle_arr *handle_array,
206 struct vmci_handle *handles;
209 array_size = vmci_handle_arr_get_size(handle_array);
211 if (array_size * sizeof(*handles) > *user_buf_size)
212 return VMCI_ERROR_MORE_DATA;
214 *user_buf_size = array_size * sizeof(*handles);
216 *retval = copy_to_user(user_buf_uva,
217 vmci_handle_arr_get_handles
218 (handle_array), *user_buf_size);
224 * Sets up a given context for notify to work. Maps the notify
225 * boolean in user VA into kernel space.
227 static int vmci_host_setup_notify(struct vmci_ctx *context,
232 if (context->notify_page) {
233 pr_devel("%s: Notify mechanism is already set up\n", __func__);
234 return VMCI_ERROR_DUPLICATE_ENTRY;
238 * We are using 'bool' internally, but let's make sure we explicit
241 BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
244 * Lock physical page backing a given user VA.
246 retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
248 context->notify_page = NULL;
249 return VMCI_ERROR_GENERIC;
253 * Map the locked page and set up notify pointer.
255 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
256 vmci_ctx_check_signal_notify(context);
261 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
262 unsigned int cmd, void __user *uptr)
264 if (cmd == IOCTL_VMCI_VERSION2) {
265 int __user *vptr = uptr;
266 if (get_user(vmci_host_dev->user_version, vptr))
271 * The basic logic here is:
273 * If the user sends in a version of 0 tell it our version.
274 * If the user didn't send in a version, tell it our version.
275 * If the user sent in an old version, tell it -its- version.
276 * If the user sent in an newer version, tell it our version.
278 * The rationale behind telling the caller its version is that
279 * Workstation 6.5 required that VMX and VMCI kernel module were
280 * version sync'd. All new VMX users will be programmed to
281 * handle the VMCI kernel module version.
284 if (vmci_host_dev->user_version > 0 &&
285 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
286 return vmci_host_dev->user_version;
292 #define vmci_ioctl_err(fmt, ...) \
293 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
295 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
296 const char *ioctl_name,
299 struct vmci_init_blk init_block;
300 const struct cred *cred;
303 if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
304 vmci_ioctl_err("error reading init block\n");
308 mutex_lock(&vmci_host_dev->lock);
310 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
311 vmci_ioctl_err("received VMCI init on initialized handle\n");
316 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
317 vmci_ioctl_err("unsupported VMCI restriction flag\n");
322 cred = get_current_cred();
323 vmci_host_dev->context = vmci_ctx_create(init_block.cid,
325 vmci_host_dev->user_version,
328 if (IS_ERR(vmci_host_dev->context)) {
329 retval = PTR_ERR(vmci_host_dev->context);
330 vmci_ioctl_err("error initializing context\n");
335 * Copy cid to userlevel, we do this to allow the VMX
336 * to enforce its policy on cid generation.
338 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
339 if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
340 vmci_ctx_destroy(vmci_host_dev->context);
341 vmci_host_dev->context = NULL;
342 vmci_ioctl_err("error writing init block\n");
347 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
348 atomic_inc(&vmci_host_active_users);
350 vmci_call_vsock_callback(true);
355 mutex_unlock(&vmci_host_dev->lock);
359 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
360 const char *ioctl_name,
363 struct vmci_datagram_snd_rcv_info send_info;
364 struct vmci_datagram *dg = NULL;
367 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
368 vmci_ioctl_err("only valid for contexts\n");
372 if (copy_from_user(&send_info, uptr, sizeof(send_info)))
375 if (send_info.len > VMCI_MAX_DG_SIZE) {
376 vmci_ioctl_err("datagram is too big (size=%d)\n",
381 if (send_info.len < sizeof(*dg)) {
382 vmci_ioctl_err("datagram is too small (size=%d)\n",
387 dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
391 "cannot allocate memory to dispatch datagram\n");
395 if (VMCI_DG_SIZE(dg) != send_info.len) {
396 vmci_ioctl_err("datagram size mismatch\n");
401 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
402 dg->dst.context, dg->dst.resource,
403 dg->src.context, dg->src.resource,
404 (unsigned long long)dg->payload_size);
406 /* Get source context id. */
407 cid = vmci_ctx_get_id(vmci_host_dev->context);
408 send_info.result = vmci_datagram_dispatch(cid, dg, true);
411 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
414 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
415 const char *ioctl_name,
418 struct vmci_datagram_snd_rcv_info recv_info;
419 struct vmci_datagram *dg = NULL;
423 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
424 vmci_ioctl_err("only valid for contexts\n");
428 if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
431 size = recv_info.len;
432 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
435 if (recv_info.result >= VMCI_SUCCESS) {
436 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
437 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
443 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
446 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
447 const char *ioctl_name,
450 struct vmci_handle handle;
454 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
455 vmci_ioctl_err("only valid for contexts\n");
459 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
460 struct vmci_qp_alloc_info_vmvm alloc_info;
461 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
463 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
466 handle = alloc_info.handle;
467 retptr = &info->result;
469 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
472 VMCI_NO_PRIVILEGE_FLAGS,
473 alloc_info.produce_size,
474 alloc_info.consume_size,
476 vmci_host_dev->context);
478 if (vmci_status == VMCI_SUCCESS)
479 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
481 struct vmci_qp_alloc_info alloc_info;
482 struct vmci_qp_alloc_info __user *info = uptr;
483 struct vmci_qp_page_store page_store;
485 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
488 handle = alloc_info.handle;
489 retptr = &info->result;
491 page_store.pages = alloc_info.ppn_va;
492 page_store.len = alloc_info.num_ppns;
494 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
497 VMCI_NO_PRIVILEGE_FLAGS,
498 alloc_info.produce_size,
499 alloc_info.consume_size,
501 vmci_host_dev->context);
504 if (put_user(vmci_status, retptr)) {
505 if (vmci_status >= VMCI_SUCCESS) {
506 vmci_status = vmci_qp_broker_detach(handle,
507 vmci_host_dev->context);
515 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
516 const char *ioctl_name,
519 struct vmci_qp_set_va_info set_va_info;
520 struct vmci_qp_set_va_info __user *info = uptr;
523 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
524 vmci_ioctl_err("only valid for contexts\n");
528 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
529 vmci_ioctl_err("is not allowed\n");
533 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
536 if (set_va_info.va) {
538 * VMX is passing down a new VA for the queue
541 result = vmci_qp_broker_map(set_va_info.handle,
542 vmci_host_dev->context,
546 * The queue pair is about to be unmapped by
549 result = vmci_qp_broker_unmap(set_va_info.handle,
550 vmci_host_dev->context, 0);
553 return put_user(result, &info->result) ? -EFAULT : 0;
556 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
557 const char *ioctl_name,
560 struct vmci_qp_page_file_info page_file_info;
561 struct vmci_qp_page_file_info __user *info = uptr;
564 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
565 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
566 vmci_ioctl_err("not supported on this VMX (version=%d)\n",
567 vmci_host_dev->user_version);
571 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
572 vmci_ioctl_err("only valid for contexts\n");
576 if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
580 * Communicate success pre-emptively to the caller. Note that the
581 * basic premise is that it is incumbent upon the caller not to look at
582 * the info.result field until after the ioctl() returns. And then,
583 * only if the ioctl() result indicates no error. We send up the
584 * SUCCESS status before calling SetPageStore() store because failing
585 * to copy up the result code means unwinding the SetPageStore().
587 * It turns out the logic to unwind a SetPageStore() opens a can of
588 * worms. For example, if a host had created the queue_pair and a
589 * guest attaches and SetPageStore() is successful but writing success
590 * fails, then ... the host has to be stopped from writing (anymore)
591 * data into the queue_pair. That means an additional test in the
592 * VMCI_Enqueue() code path. Ugh.
595 if (put_user(VMCI_SUCCESS, &info->result)) {
597 * In this case, we can't write a result field of the
598 * caller's info block. So, we don't even try to
604 result = vmci_qp_broker_set_page_store(page_file_info.handle,
605 page_file_info.produce_va,
606 page_file_info.consume_va,
607 vmci_host_dev->context);
608 if (result < VMCI_SUCCESS) {
609 if (put_user(result, &info->result)) {
611 * Note that in this case the SetPageStore()
612 * call failed but we were unable to
613 * communicate that to the caller (because the
614 * copy_to_user() call failed). So, if we
615 * simply return an error (in this case
616 * -EFAULT) then the caller will know that the
617 * SetPageStore failed even though we couldn't
618 * put the result code in the result field and
619 * indicate exactly why it failed.
621 * That says nothing about the issue where we
622 * were once able to write to the caller's info
623 * memory and now can't. Something more
624 * serious is probably going on than the fact
625 * that SetPageStore() didn't work.
634 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
635 const char *ioctl_name,
638 struct vmci_qp_dtch_info detach_info;
639 struct vmci_qp_dtch_info __user *info = uptr;
642 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
643 vmci_ioctl_err("only valid for contexts\n");
647 if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
650 result = vmci_qp_broker_detach(detach_info.handle,
651 vmci_host_dev->context);
652 if (result == VMCI_SUCCESS &&
653 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
654 result = VMCI_SUCCESS_LAST_DETACH;
657 return put_user(result, &info->result) ? -EFAULT : 0;
660 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
661 const char *ioctl_name,
664 struct vmci_ctx_info ar_info;
665 struct vmci_ctx_info __user *info = uptr;
669 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
670 vmci_ioctl_err("only valid for contexts\n");
674 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
677 cid = vmci_ctx_get_id(vmci_host_dev->context);
678 result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
680 return put_user(result, &info->result) ? -EFAULT : 0;
683 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
684 const char *ioctl_name,
687 struct vmci_ctx_info ar_info;
688 struct vmci_ctx_info __user *info = uptr;
692 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
693 vmci_ioctl_err("only valid for contexts\n");
697 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
700 cid = vmci_ctx_get_id(vmci_host_dev->context);
701 result = vmci_ctx_remove_notification(cid,
704 return put_user(result, &info->result) ? -EFAULT : 0;
707 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
708 const char *ioctl_name,
711 struct vmci_ctx_chkpt_buf_info get_info;
716 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
717 vmci_ioctl_err("only valid for contexts\n");
721 if (copy_from_user(&get_info, uptr, sizeof(get_info)))
724 cid = vmci_ctx_get_id(vmci_host_dev->context);
725 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
726 &get_info.buf_size, &cpt_buf);
727 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
728 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
729 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
736 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
739 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
740 const char *ioctl_name,
743 struct vmci_ctx_chkpt_buf_info set_info;
748 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
749 vmci_ioctl_err("only valid for contexts\n");
753 if (copy_from_user(&set_info, uptr, sizeof(set_info)))
756 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
759 return PTR_ERR(cpt_buf);
761 cid = vmci_ctx_get_id(vmci_host_dev->context);
762 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
763 set_info.buf_size, cpt_buf);
765 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
771 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
772 const char *ioctl_name,
775 u32 __user *u32ptr = uptr;
777 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
780 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
781 const char *ioctl_name,
784 struct vmci_set_notify_info notify_info;
786 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
787 vmci_ioctl_err("only valid for contexts\n");
791 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info)))
794 if (notify_info.notify_uva) {
796 vmci_host_setup_notify(vmci_host_dev->context,
797 notify_info.notify_uva);
799 vmci_ctx_unset_notify(vmci_host_dev->context);
800 notify_info.result = VMCI_SUCCESS;
803 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ?
807 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
808 const char *ioctl_name,
811 struct vmci_dbell_notify_resource_info info;
814 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
815 vmci_ioctl_err("invalid for current VMX versions\n");
819 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
820 vmci_ioctl_err("only valid for contexts\n");
824 if (copy_from_user(&info, uptr, sizeof(info)))
827 cid = vmci_ctx_get_id(vmci_host_dev->context);
829 switch (info.action) {
830 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
831 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
832 u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
833 info.result = vmci_ctx_notify_dbell(cid, info.handle,
836 info.result = VMCI_ERROR_UNAVAILABLE;
840 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
841 info.result = vmci_ctx_dbell_create(cid, info.handle);
844 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
845 info.result = vmci_ctx_dbell_destroy(cid, info.handle);
849 vmci_ioctl_err("got unknown action (action=%d)\n",
851 info.result = VMCI_ERROR_INVALID_ARGS;
854 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
857 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
858 const char *ioctl_name,
861 struct vmci_ctx_notify_recv_info info;
862 struct vmci_handle_arr *db_handle_array;
863 struct vmci_handle_arr *qp_handle_array;
868 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
869 vmci_ioctl_err("only valid for contexts\n");
873 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
874 vmci_ioctl_err("not supported for the current vmx version\n");
878 if (copy_from_user(&info, uptr, sizeof(info)))
881 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
882 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
886 cid = vmci_ctx_get_id(vmci_host_dev->context);
888 info.result = vmci_ctx_rcv_notifications_get(cid,
889 &db_handle_array, &qp_handle_array);
890 if (info.result != VMCI_SUCCESS)
891 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
893 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
894 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
895 db_handle_array, &retval);
896 if (info.result == VMCI_SUCCESS && !retval) {
897 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
898 info.result = drv_cp_harray_to_user(ubuf,
899 &info.qp_handle_buf_size,
900 qp_handle_array, &retval);
903 if (!retval && copy_to_user(uptr, &info, sizeof(info)))
906 vmci_ctx_rcv_notifications_release(cid,
907 db_handle_array, qp_handle_array,
908 info.result == VMCI_SUCCESS && !retval);
913 static long vmci_host_unlocked_ioctl(struct file *filp,
914 unsigned int iocmd, unsigned long ioarg)
916 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
917 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
918 return vmci_host_do_ ## ioctl_fn( \
919 vmci_host_dev, name, uptr); \
922 struct vmci_host_dev *vmci_host_dev = filp->private_data;
923 void __user *uptr = (void __user *)ioarg;
926 case IOCTL_VMCI_INIT_CONTEXT:
927 VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
928 case IOCTL_VMCI_DATAGRAM_SEND:
929 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
930 case IOCTL_VMCI_DATAGRAM_RECEIVE:
931 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
932 case IOCTL_VMCI_QUEUEPAIR_ALLOC:
933 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
934 case IOCTL_VMCI_QUEUEPAIR_SETVA:
935 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
936 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
937 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
938 case IOCTL_VMCI_QUEUEPAIR_DETACH:
939 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
940 case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
941 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
942 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
943 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
944 case IOCTL_VMCI_CTX_GET_CPT_STATE:
945 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
946 case IOCTL_VMCI_CTX_SET_CPT_STATE:
947 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
948 case IOCTL_VMCI_GET_CONTEXT_ID:
949 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
950 case IOCTL_VMCI_SET_NOTIFY:
951 VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
952 case IOCTL_VMCI_NOTIFY_RESOURCE:
953 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
954 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
955 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
957 case IOCTL_VMCI_VERSION:
958 case IOCTL_VMCI_VERSION2:
959 return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
962 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
969 static const struct file_operations vmuser_fops = {
970 .owner = THIS_MODULE,
971 .open = vmci_host_open,
972 .release = vmci_host_close,
973 .poll = vmci_host_poll,
974 .unlocked_ioctl = vmci_host_unlocked_ioctl,
975 .compat_ioctl = compat_ptr_ioctl,
978 static struct miscdevice vmci_host_miscdev = {
980 .minor = MISC_DYNAMIC_MINOR,
981 .fops = &vmuser_fops,
984 int __init vmci_host_init(void)
988 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
989 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
990 -1, VMCI_VERSION, NULL);
991 if (IS_ERR(host_context)) {
992 error = PTR_ERR(host_context);
993 pr_warn("Failed to initialize VMCIContext (error%d)\n",
998 error = misc_register(&vmci_host_miscdev);
1000 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
1001 vmci_host_miscdev.name,
1002 MISC_MAJOR, vmci_host_miscdev.minor,
1004 pr_warn("Unable to initialize host personality\n");
1005 vmci_ctx_destroy(host_context);
1009 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1010 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1012 vmci_host_device_initialized = true;
1016 void __exit vmci_host_exit(void)
1018 vmci_host_device_initialized = false;
1020 misc_deregister(&vmci_host_miscdev);
1021 vmci_ctx_destroy(host_context);
1022 vmci_qp_broker_exit();
1024 pr_debug("VMCI host driver module unloaded\n");