1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 #define GUEST_MAPPINGS_TRIES 5
31 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around.
34 * This operation is a little bit tricky since the VMM might not accept
35 * just any address because of address clashes between the three contexts
36 * it operates in, so we try several times.
38 * Failure to reserve the guest mappings is ignored.
40 * @gdev: The Guest extension device.
42 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
44 struct vmmdev_hypervisorinfo *req;
45 void *guest_mappings[GUEST_MAPPINGS_TRIES];
46 struct page **pages = NULL;
47 u32 size, hypervisor_size;
50 /* Query the required space. */
51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
55 req->hypervisor_start = 0;
56 req->hypervisor_size = 0;
57 rc = vbg_req_perform(gdev, req);
62 * The VMM will report back if there is nothing it wants to map, like
63 * for instance in VT-x and AMD-V mode.
65 if (req->hypervisor_size == 0)
68 hypervisor_size = req->hypervisor_size;
69 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
70 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
72 pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
76 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
77 if (!gdev->guest_mappings_dummy_page)
80 for (i = 0; i < (size >> PAGE_SHIFT); i++)
81 pages[i] = gdev->guest_mappings_dummy_page;
84 * Try several times, the VMM might not accept some addresses because
85 * of address clashes between the three contexts.
87 for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
88 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
89 VM_MAP, PAGE_KERNEL_RO);
90 if (!guest_mappings[i])
93 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
94 req->header.rc = VERR_INTERNAL_ERROR;
95 req->hypervisor_size = hypervisor_size;
96 req->hypervisor_start =
97 (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
99 rc = vbg_req_perform(gdev, req);
101 gdev->guest_mappings = guest_mappings[i];
106 /* Free vmap's from failed attempts. */
108 vunmap(guest_mappings[i]);
110 /* On failure free the dummy-page backing the vmap */
111 if (!gdev->guest_mappings) {
112 __free_page(gdev->guest_mappings_dummy_page);
113 gdev->guest_mappings_dummy_page = NULL;
117 vbg_req_free(req, sizeof(*req));
122 * Undo what vbg_guest_mappings_init did.
124 * @gdev: The Guest extension device.
126 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
128 struct vmmdev_hypervisorinfo *req;
131 if (!gdev->guest_mappings)
135 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.)
138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
142 req->hypervisor_start = 0;
143 req->hypervisor_size = 0;
145 rc = vbg_req_perform(gdev, req);
147 vbg_req_free(req, sizeof(*req));
150 vbg_err("%s error: %d\n", __func__, rc);
154 vunmap(gdev->guest_mappings);
155 gdev->guest_mappings = NULL;
157 __free_page(gdev->guest_mappings_dummy_page);
158 gdev->guest_mappings_dummy_page = NULL;
162 * Report the guest information to the host.
163 * Return: 0 or negative errno value.
164 * @gdev: The Guest extension device.
166 static int vbg_report_guest_info(struct vbg_dev *gdev)
169 * Allocate and fill in the two guest info reports.
171 struct vmmdev_guest_info *req1 = NULL;
172 struct vmmdev_guest_info2 *req2 = NULL;
173 int rc, ret = -ENOMEM;
175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
180 req1->interface_version = VMMDEV_VERSION;
181 req1->os_type = VMMDEV_OSTYPE_LINUX26;
182 #if __BITS_PER_LONG == 64
183 req1->os_type |= VMMDEV_OSTYPE_X64;
186 req2->additions_major = VBG_VERSION_MAJOR;
187 req2->additions_minor = VBG_VERSION_MINOR;
188 req2->additions_build = VBG_VERSION_BUILD;
189 req2->additions_revision = VBG_SVN_REV;
190 /* (no features defined yet) */
191 req2->additions_features = 0;
192 strlcpy(req2->name, VBG_VERSION_STRING,
196 * There are two protocols here:
197 * 1. INFO2 + INFO1. Supported by >=3.2.51.
198 * 2. INFO1 and optionally INFO2. The old protocol.
200 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
201 * if not supported by the VMMDev (message ordering requirement).
203 rc = vbg_req_perform(gdev, req2);
205 rc = vbg_req_perform(gdev, req1);
206 } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
207 rc = vbg_req_perform(gdev, req1);
209 rc = vbg_req_perform(gdev, req2);
210 if (rc == VERR_NOT_IMPLEMENTED)
214 ret = vbg_status_code_to_errno(rc);
217 vbg_req_free(req2, sizeof(*req2));
218 vbg_req_free(req1, sizeof(*req1));
223 * Report the guest driver status to the host.
224 * Return: 0 or negative errno value.
225 * @gdev: The Guest extension device.
226 * @active: Flag whether the driver is now active or not.
228 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
230 struct vmmdev_guest_status *req;
233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
237 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
239 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
241 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
244 rc = vbg_req_perform(gdev, req);
245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
248 vbg_req_free(req, sizeof(*req));
250 return vbg_status_code_to_errno(rc);
254 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
255 * Return: 0 or negative errno value.
256 * @gdev: The Guest extension device.
257 * @chunk_idx: Index of the chunk.
259 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
261 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
265 pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
267 GFP_KERNEL | __GFP_NOWARN);
271 req->header.size = sizeof(*req);
273 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
275 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
276 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
282 req->phys_page[i] = page_to_phys(pages[i]);
285 rc = vbg_req_perform(gdev, req);
287 vbg_err("%s error, rc: %d\n", __func__, rc);
288 ret = vbg_status_code_to_errno(rc);
292 gdev->mem_balloon.pages[chunk_idx] = pages;
298 __free_page(pages[i]);
305 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
306 * Return: 0 or negative errno value.
307 * @gdev: The Guest extension device.
308 * @chunk_idx: Index of the chunk.
310 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
312 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
313 struct page **pages = gdev->mem_balloon.pages[chunk_idx];
316 req->header.size = sizeof(*req);
317 req->inflate = false;
318 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
320 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
321 req->phys_page[i] = page_to_phys(pages[i]);
323 rc = vbg_req_perform(gdev, req);
325 vbg_err("%s error, rc: %d\n", __func__, rc);
326 return vbg_status_code_to_errno(rc);
329 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
330 __free_page(pages[i]);
332 gdev->mem_balloon.pages[chunk_idx] = NULL;
338 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
339 * the host wants the balloon to be and adjust accordingly.
341 static void vbg_balloon_work(struct work_struct *work)
343 struct vbg_dev *gdev =
344 container_of(work, struct vbg_dev, mem_balloon.work);
345 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
350 * Setting this bit means that we request the value from the host and
351 * change the guest memory balloon according to the returned value.
353 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
354 rc = vbg_req_perform(gdev, req);
356 vbg_err("%s error, rc: %d)\n", __func__, rc);
361 * The host always returns the same maximum amount of chunks, so
364 if (!gdev->mem_balloon.max_chunks) {
365 gdev->mem_balloon.pages =
366 devm_kcalloc(gdev->dev, req->phys_mem_chunks,
367 sizeof(struct page **), GFP_KERNEL);
368 if (!gdev->mem_balloon.pages)
371 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
374 chunks = req->balloon_chunks;
375 if (chunks > gdev->mem_balloon.max_chunks) {
376 vbg_err("%s: illegal balloon size %u (max=%u)\n",
377 __func__, chunks, gdev->mem_balloon.max_chunks);
381 if (chunks > gdev->mem_balloon.chunks) {
383 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
384 ret = vbg_balloon_inflate(gdev, i);
388 gdev->mem_balloon.chunks++;
392 for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
393 ret = vbg_balloon_deflate(gdev, i);
397 gdev->mem_balloon.chunks--;
403 * Callback for heartbeat timer.
405 static void vbg_heartbeat_timer(struct timer_list *t)
407 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
409 vbg_req_perform(gdev, gdev->guest_heartbeat_req);
410 mod_timer(&gdev->heartbeat_timer,
411 msecs_to_jiffies(gdev->heartbeat_interval_ms));
415 * Configure the host to check guest's heartbeat
416 * and get heartbeat interval from the host.
417 * Return: 0 or negative errno value.
418 * @gdev: The Guest extension device.
419 * @enabled: Set true to enable guest heartbeat checks on host.
421 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
423 struct vmmdev_heartbeat *req;
426 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
430 req->enabled = enabled;
431 req->interval_ns = 0;
432 rc = vbg_req_perform(gdev, req);
433 do_div(req->interval_ns, 1000000); /* ns -> ms */
434 gdev->heartbeat_interval_ms = req->interval_ns;
435 vbg_req_free(req, sizeof(*req));
437 return vbg_status_code_to_errno(rc);
441 * Initializes the heartbeat timer. This feature may be disabled by the host.
442 * Return: 0 or negative errno value.
443 * @gdev: The Guest extension device.
445 static int vbg_heartbeat_init(struct vbg_dev *gdev)
449 /* Make sure that heartbeat checking is disabled if we fail. */
450 ret = vbg_heartbeat_host_config(gdev, false);
454 ret = vbg_heartbeat_host_config(gdev, true);
458 gdev->guest_heartbeat_req = vbg_req_alloc(
459 sizeof(*gdev->guest_heartbeat_req),
460 VMMDEVREQ_GUEST_HEARTBEAT);
461 if (!gdev->guest_heartbeat_req)
464 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
465 __func__, gdev->heartbeat_interval_ms);
466 mod_timer(&gdev->heartbeat_timer, 0);
472 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
473 * @gdev: The Guest extension device.
475 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
477 del_timer_sync(&gdev->heartbeat_timer);
478 vbg_heartbeat_host_config(gdev, false);
479 vbg_req_free(gdev->guest_heartbeat_req,
480 sizeof(*gdev->guest_heartbeat_req));
484 * Applies a change to the bit usage tracker.
485 * Return: true if the mask changed, false if not.
486 * @tracker: The bit usage tracker.
487 * @changed: The bits to change.
488 * @previous: The previous value of the bits.
490 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
491 u32 changed, u32 previous)
493 bool global_change = false;
496 u32 bit = ffs(changed) - 1;
497 u32 bitmask = BIT(bit);
499 if (bitmask & previous) {
500 tracker->per_bit_usage[bit] -= 1;
501 if (tracker->per_bit_usage[bit] == 0) {
502 global_change = true;
503 tracker->mask &= ~bitmask;
506 tracker->per_bit_usage[bit] += 1;
507 if (tracker->per_bit_usage[bit] == 1) {
508 global_change = true;
509 tracker->mask |= bitmask;
516 return global_change;
520 * Init and termination worker for resetting the (host) event filter on the host
521 * Return: 0 or negative errno value.
522 * @gdev: The Guest extension device.
523 * @fixed_events: Fixed events (init time).
525 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
528 struct vmmdev_mask *req;
531 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
535 req->not_mask = U32_MAX & ~fixed_events;
536 req->or_mask = fixed_events;
537 rc = vbg_req_perform(gdev, req);
539 vbg_err("%s error, rc: %d\n", __func__, rc);
541 vbg_req_free(req, sizeof(*req));
542 return vbg_status_code_to_errno(rc);
546 * Changes the event filter mask for the given session.
548 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
549 * do session cleanup. Takes the session spinlock.
551 * Return: 0 or negative errno value.
552 * @gdev: The Guest extension device.
553 * @session: The session.
554 * @or_mask: The events to add.
555 * @not_mask: The events to remove.
556 * @session_termination: Set if we're called by the session cleanup code.
557 * This tweaks the error handling so we perform
558 * proper session cleanup even if the host
561 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
562 struct vbg_session *session,
563 u32 or_mask, u32 not_mask,
564 bool session_termination)
566 struct vmmdev_mask *req;
567 u32 changed, previous;
570 /* Allocate a request buffer before taking the spinlock */
571 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
573 if (!session_termination)
575 /* Ignore allocation failure, we must do session cleanup. */
578 mutex_lock(&gdev->session_mutex);
580 /* Apply the changes to the session mask. */
581 previous = session->event_filter;
582 session->event_filter |= or_mask;
583 session->event_filter &= ~not_mask;
585 /* If anything actually changed, update the global usage counters. */
586 changed = previous ^ session->event_filter;
590 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
591 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
593 if (gdev->event_filter_host == or_mask || !req)
596 gdev->event_filter_host = or_mask;
597 req->or_mask = or_mask;
598 req->not_mask = ~or_mask;
599 rc = vbg_req_perform(gdev, req);
601 ret = vbg_status_code_to_errno(rc);
603 /* Failed, roll back (unless it's session termination time). */
604 gdev->event_filter_host = U32_MAX;
605 if (session_termination)
608 vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
609 session->event_filter);
610 session->event_filter = previous;
614 mutex_unlock(&gdev->session_mutex);
615 vbg_req_free(req, sizeof(*req));
621 * Init and termination worker for set guest capabilities to zero on the host.
622 * Return: 0 or negative errno value.
623 * @gdev: The Guest extension device.
625 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
627 struct vmmdev_mask *req;
630 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
634 req->not_mask = U32_MAX;
636 rc = vbg_req_perform(gdev, req);
638 vbg_err("%s error, rc: %d\n", __func__, rc);
640 vbg_req_free(req, sizeof(*req));
641 return vbg_status_code_to_errno(rc);
645 * Sets the guest capabilities for a session. Takes the session spinlock.
646 * Return: 0 or negative errno value.
647 * @gdev: The Guest extension device.
648 * @session: The session.
649 * @or_mask: The capabilities to add.
650 * @not_mask: The capabilities to remove.
651 * @session_termination: Set if we're called by the session cleanup code.
652 * This tweaks the error handling so we perform
653 * proper session cleanup even if the host
656 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
657 struct vbg_session *session,
658 u32 or_mask, u32 not_mask,
659 bool session_termination)
661 struct vmmdev_mask *req;
662 u32 changed, previous;
665 /* Allocate a request buffer before taking the spinlock */
666 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
668 if (!session_termination)
670 /* Ignore allocation failure, we must do session cleanup. */
673 mutex_lock(&gdev->session_mutex);
675 /* Apply the changes to the session mask. */
676 previous = session->guest_caps;
677 session->guest_caps |= or_mask;
678 session->guest_caps &= ~not_mask;
680 /* If anything actually changed, update the global usage counters. */
681 changed = previous ^ session->guest_caps;
685 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
686 or_mask = gdev->guest_caps_tracker.mask;
688 if (gdev->guest_caps_host == or_mask || !req)
691 gdev->guest_caps_host = or_mask;
692 req->or_mask = or_mask;
693 req->not_mask = ~or_mask;
694 rc = vbg_req_perform(gdev, req);
696 ret = vbg_status_code_to_errno(rc);
698 /* Failed, roll back (unless it's session termination time). */
699 gdev->guest_caps_host = U32_MAX;
700 if (session_termination)
703 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
704 session->guest_caps);
705 session->guest_caps = previous;
709 mutex_unlock(&gdev->session_mutex);
710 vbg_req_free(req, sizeof(*req));
716 * vbg_query_host_version get the host feature mask and version information.
717 * Return: 0 or negative errno value.
718 * @gdev: The Guest extension device.
720 static int vbg_query_host_version(struct vbg_dev *gdev)
722 struct vmmdev_host_version *req;
725 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
729 rc = vbg_req_perform(gdev, req);
730 ret = vbg_status_code_to_errno(rc);
732 vbg_err("%s error: %d\n", __func__, rc);
736 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
737 req->major, req->minor, req->build, req->revision);
738 gdev->host_features = req->features;
740 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
741 gdev->host_features);
743 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
744 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
749 vbg_req_free(req, sizeof(*req));
754 * Initializes the VBoxGuest device extension when the
755 * device driver is loaded.
757 * The native code locates the VMMDev on the PCI bus and retrieve
758 * the MMIO and I/O port ranges, this function will take care of
759 * mapping the MMIO memory (if present). Upon successful return
760 * the native code should set up the interrupt handler.
762 * Return: 0 or negative errno value.
764 * @gdev: The Guest extension device.
765 * @fixed_events: Events that will be enabled upon init and no client
766 * will ever be allowed to mask.
768 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
772 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
773 gdev->event_filter_host = U32_MAX; /* forces a report */
774 gdev->guest_caps_host = U32_MAX; /* forces a report */
776 init_waitqueue_head(&gdev->event_wq);
777 init_waitqueue_head(&gdev->hgcm_wq);
778 spin_lock_init(&gdev->event_spinlock);
779 mutex_init(&gdev->session_mutex);
780 mutex_init(&gdev->cancel_req_mutex);
781 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
782 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
784 gdev->mem_balloon.get_req =
785 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
786 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
787 gdev->mem_balloon.change_req =
788 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
789 VMMDEVREQ_CHANGE_MEMBALLOON);
791 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
792 VMMDEVREQ_HGCM_CANCEL2);
793 gdev->ack_events_req =
794 vbg_req_alloc(sizeof(*gdev->ack_events_req),
795 VMMDEVREQ_ACKNOWLEDGE_EVENTS);
796 gdev->mouse_status_req =
797 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
798 VMMDEVREQ_GET_MOUSE_STATUS);
800 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
801 !gdev->cancel_req || !gdev->ack_events_req ||
802 !gdev->mouse_status_req)
805 ret = vbg_query_host_version(gdev);
809 ret = vbg_report_guest_info(gdev);
811 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
815 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
817 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
822 ret = vbg_reset_host_capabilities(gdev);
824 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
829 ret = vbg_core_set_mouse_status(gdev, 0);
831 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
835 /* These may fail without requiring the driver init to fail. */
836 vbg_guest_mappings_init(gdev);
837 vbg_heartbeat_init(gdev);
840 ret = vbg_report_driver_status(gdev, true);
842 vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
847 vbg_req_free(gdev->mouse_status_req,
848 sizeof(*gdev->mouse_status_req));
849 vbg_req_free(gdev->ack_events_req,
850 sizeof(*gdev->ack_events_req));
851 vbg_req_free(gdev->cancel_req,
852 sizeof(*gdev->cancel_req));
853 vbg_req_free(gdev->mem_balloon.change_req,
854 sizeof(*gdev->mem_balloon.change_req));
855 vbg_req_free(gdev->mem_balloon.get_req,
856 sizeof(*gdev->mem_balloon.get_req));
861 * Call this on exit to clean-up vboxguest-core managed resources.
863 * The native code should call this before the driver is loaded,
864 * but don't call this on shutdown.
865 * @gdev: The Guest extension device.
867 void vbg_core_exit(struct vbg_dev *gdev)
869 vbg_heartbeat_exit(gdev);
870 vbg_guest_mappings_exit(gdev);
872 /* Clear the host flags (mouse status etc). */
873 vbg_reset_host_event_filter(gdev, 0);
874 vbg_reset_host_capabilities(gdev);
875 vbg_core_set_mouse_status(gdev, 0);
877 vbg_req_free(gdev->mouse_status_req,
878 sizeof(*gdev->mouse_status_req));
879 vbg_req_free(gdev->ack_events_req,
880 sizeof(*gdev->ack_events_req));
881 vbg_req_free(gdev->cancel_req,
882 sizeof(*gdev->cancel_req));
883 vbg_req_free(gdev->mem_balloon.change_req,
884 sizeof(*gdev->mem_balloon.change_req));
885 vbg_req_free(gdev->mem_balloon.get_req,
886 sizeof(*gdev->mem_balloon.get_req));
890 * Creates a VBoxGuest user session.
892 * vboxguest_linux.c calls this when userspace opens the char-device.
893 * Return: A pointer to the new session or an ERR_PTR on error.
894 * @gdev: The Guest extension device.
895 * @user: Set if this is a session for the vboxuser device.
897 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
899 struct vbg_session *session;
901 session = kzalloc(sizeof(*session), GFP_KERNEL);
903 return ERR_PTR(-ENOMEM);
905 session->gdev = gdev;
906 session->user_session = user;
912 * Closes a VBoxGuest session.
913 * @session: The session to close (and free).
915 void vbg_core_close_session(struct vbg_session *session)
917 struct vbg_dev *gdev = session->gdev;
920 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
921 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
923 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
924 if (!session->hgcm_client_ids[i])
927 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
933 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
936 if (hdr->size_in != (sizeof(*hdr) + in_size) ||
937 hdr->size_out != (sizeof(*hdr) + out_size))
943 static int vbg_ioctl_driver_version_info(
944 struct vbg_ioctl_driver_version_info *info)
946 const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
947 u16 min_maj_version, req_maj_version;
949 if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
952 req_maj_version = info->u.in.req_version >> 16;
953 min_maj_version = info->u.in.min_version >> 16;
955 if (info->u.in.min_version > info->u.in.req_version ||
956 min_maj_version != req_maj_version)
959 if (info->u.in.min_version <= VBG_IOC_VERSION &&
960 min_maj_version == vbg_maj_version) {
961 info->u.out.session_version = VBG_IOC_VERSION;
963 info->u.out.session_version = U32_MAX;
964 info->hdr.rc = VERR_VERSION_MISMATCH;
967 info->u.out.driver_version = VBG_IOC_VERSION;
968 info->u.out.driver_revision = 0;
969 info->u.out.reserved1 = 0;
970 info->u.out.reserved2 = 0;
975 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
976 struct vbg_session *session,
983 spin_lock_irqsave(&gdev->event_spinlock, flags);
985 events = gdev->pending_events & event_mask;
986 wakeup = events || session->cancel_waiters;
988 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
993 /* Must be called with the event_lock held */
994 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
995 struct vbg_session *session,
998 u32 events = gdev->pending_events & event_mask;
1000 gdev->pending_events &= ~events;
1004 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1005 struct vbg_session *session,
1006 struct vbg_ioctl_wait_for_events *wait)
1008 u32 timeout_ms = wait->u.in.timeout_ms;
1009 u32 event_mask = wait->u.in.events;
1010 unsigned long flags;
1014 if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1017 if (timeout_ms == U32_MAX)
1018 timeout = MAX_SCHEDULE_TIMEOUT;
1020 timeout = msecs_to_jiffies(timeout_ms);
1022 wait->u.out.events = 0;
1024 timeout = wait_event_interruptible_timeout(
1026 vbg_wait_event_cond(gdev, session, event_mask),
1029 spin_lock_irqsave(&gdev->event_spinlock, flags);
1031 if (timeout < 0 || session->cancel_waiters) {
1033 } else if (timeout == 0) {
1036 wait->u.out.events =
1037 vbg_consume_events_locked(gdev, session, event_mask);
1040 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1043 * Someone else may have consumed the event(s) first, in
1044 * which case we go back to waiting.
1046 } while (ret == 0 && wait->u.out.events == 0);
1051 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1052 struct vbg_session *session,
1053 struct vbg_ioctl_hdr *hdr)
1055 unsigned long flags;
1057 if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1060 spin_lock_irqsave(&gdev->event_spinlock, flags);
1061 session->cancel_waiters = true;
1062 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1064 wake_up(&gdev->event_wq);
1070 * Checks if the VMM request is allowed in the context of the given session.
1071 * Return: 0 or negative errno value.
1072 * @gdev: The Guest extension device.
1073 * @session: The calling session.
1074 * @req: The request.
1076 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1077 const struct vmmdev_request_header *req)
1079 const struct vmmdev_guest_status *guest_status;
1080 bool trusted_apps_only;
1082 switch (req->request_type) {
1083 /* Trusted users apps only. */
1084 case VMMDEVREQ_QUERY_CREDENTIALS:
1085 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1086 case VMMDEVREQ_REGISTER_SHARED_MODULE:
1087 case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1088 case VMMDEVREQ_WRITE_COREDUMP:
1089 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1090 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1091 case VMMDEVREQ_CHECK_SHARED_MODULES:
1092 case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1093 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1094 case VMMDEVREQ_REPORT_GUEST_STATS:
1095 case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1096 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1097 trusted_apps_only = true;
1101 case VMMDEVREQ_GET_MOUSE_STATUS:
1102 case VMMDEVREQ_SET_MOUSE_STATUS:
1103 case VMMDEVREQ_SET_POINTER_SHAPE:
1104 case VMMDEVREQ_GET_HOST_VERSION:
1105 case VMMDEVREQ_IDLE:
1106 case VMMDEVREQ_GET_HOST_TIME:
1107 case VMMDEVREQ_SET_POWER_STATUS:
1108 case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1109 case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1110 case VMMDEVREQ_REPORT_GUEST_STATUS:
1111 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1112 case VMMDEVREQ_VIDEMODE_SUPPORTED:
1113 case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1114 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1115 case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1116 case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1117 case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1118 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1119 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1120 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1121 case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1122 case VMMDEVREQ_LOG_STRING:
1123 case VMMDEVREQ_GET_SESSION_ID:
1124 trusted_apps_only = false;
1127 /* Depends on the request parameters... */
1128 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1129 guest_status = (const struct vmmdev_guest_status *)req;
1130 switch (guest_status->facility) {
1131 case VBOXGUEST_FACILITY_TYPE_ALL:
1132 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1133 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1134 guest_status->facility);
1136 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1137 trusted_apps_only = true;
1139 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1140 case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1141 case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1143 trusted_apps_only = false;
1148 /* Anything else is not allowed. */
1150 vbg_err("Denying userspace vmm call type %#08x\n",
1155 if (trusted_apps_only && session->user_session) {
1156 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1164 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1165 struct vbg_session *session, void *data)
1167 struct vbg_ioctl_hdr *hdr = data;
1170 if (hdr->size_in != hdr->size_out)
1173 if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1176 if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1179 ret = vbg_req_allowed(gdev, session, data);
1183 vbg_req_perform(gdev, data);
1184 WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1189 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1190 struct vbg_session *session,
1191 struct vbg_ioctl_hgcm_connect *conn)
1196 if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1199 /* Find a free place in the sessions clients array and claim it */
1200 mutex_lock(&gdev->session_mutex);
1201 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1202 if (!session->hgcm_client_ids[i]) {
1203 session->hgcm_client_ids[i] = U32_MAX;
1207 mutex_unlock(&gdev->session_mutex);
1209 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1212 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
1215 mutex_lock(&gdev->session_mutex);
1216 if (ret == 0 && conn->hdr.rc >= 0) {
1217 conn->u.out.client_id = client_id;
1218 session->hgcm_client_ids[i] = client_id;
1220 conn->u.out.client_id = 0;
1221 session->hgcm_client_ids[i] = 0;
1223 mutex_unlock(&gdev->session_mutex);
1228 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1229 struct vbg_session *session,
1230 struct vbg_ioctl_hgcm_disconnect *disconn)
1235 if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1238 client_id = disconn->u.in.client_id;
1239 if (client_id == 0 || client_id == U32_MAX)
1242 mutex_lock(&gdev->session_mutex);
1243 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1244 if (session->hgcm_client_ids[i] == client_id) {
1245 session->hgcm_client_ids[i] = U32_MAX;
1249 mutex_unlock(&gdev->session_mutex);
1251 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1254 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
1256 mutex_lock(&gdev->session_mutex);
1257 if (ret == 0 && disconn->hdr.rc >= 0)
1258 session->hgcm_client_ids[i] = 0;
1260 session->hgcm_client_ids[i] = client_id;
1261 mutex_unlock(&gdev->session_mutex);
1266 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1269 case VMMDEV_HGCM_PARM_TYPE_32BIT:
1270 case VMMDEV_HGCM_PARM_TYPE_64BIT:
1271 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1272 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1273 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1280 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1281 struct vbg_session *session, bool f32bit,
1282 struct vbg_ioctl_hgcm_call *call)
1288 if (call->hdr.size_in < sizeof(*call))
1291 if (call->hdr.size_in != call->hdr.size_out)
1294 if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1297 client_id = call->client_id;
1298 if (client_id == 0 || client_id == U32_MAX)
1301 actual_size = sizeof(*call);
1303 actual_size += call->parm_count *
1304 sizeof(struct vmmdev_hgcm_function_parameter32);
1306 actual_size += call->parm_count *
1307 sizeof(struct vmmdev_hgcm_function_parameter);
1308 if (call->hdr.size_in < actual_size) {
1309 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1310 call->hdr.size_in, actual_size);
1313 call->hdr.size_out = actual_size;
1315 /* Validate parameter types */
1317 struct vmmdev_hgcm_function_parameter32 *parm =
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call);
1320 for (i = 0; i < call->parm_count; i++)
1321 if (!vbg_param_valid(parm[i].type))
1324 struct vmmdev_hgcm_function_parameter *parm =
1325 VBG_IOCTL_HGCM_CALL_PARMS(call);
1327 for (i = 0; i < call->parm_count; i++)
1328 if (!vbg_param_valid(parm[i].type))
1333 * Validate the client id.
1335 mutex_lock(&gdev->session_mutex);
1336 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1337 if (session->hgcm_client_ids[i] == client_id)
1339 mutex_unlock(&gdev->session_mutex);
1340 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1341 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1346 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1347 ret = vbg_hgcm_call32(gdev, client_id,
1348 call->function, call->timeout_ms,
1349 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1350 call->parm_count, &call->hdr.rc);
1352 ret = vbg_hgcm_call(gdev, client_id,
1353 call->function, call->timeout_ms,
1354 VBG_IOCTL_HGCM_CALL_PARMS(call),
1355 call->parm_count, &call->hdr.rc);
1357 if (ret == -E2BIG) {
1358 /* E2BIG needs to be reported through the hdr.rc field. */
1359 call->hdr.rc = VERR_OUT_OF_RANGE;
1363 if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1364 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1369 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1371 if (log->hdr.size_out != sizeof(log->hdr))
1374 vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1380 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1381 struct vbg_session *session,
1382 struct vbg_ioctl_change_filter *filter)
1384 u32 or_mask, not_mask;
1386 if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1389 or_mask = filter->u.in.or_mask;
1390 not_mask = filter->u.in.not_mask;
1392 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1395 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1399 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1400 struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1402 u32 or_mask, not_mask;
1405 if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1408 or_mask = caps->u.in.or_mask;
1409 not_mask = caps->u.in.not_mask;
1411 if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1414 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1419 caps->u.out.session_caps = session->guest_caps;
1420 caps->u.out.global_caps = gdev->guest_caps_host;
1425 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1426 struct vbg_ioctl_check_balloon *balloon_info)
1428 if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1431 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1433 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1434 * events entirely in the kernel, see vbg_core_isr().
1436 balloon_info->u.out.handle_in_r3 = false;
1441 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1442 struct vbg_ioctl_write_coredump *dump)
1444 struct vmmdev_write_core_dump *req;
1446 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1449 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
1453 req->flags = dump->u.in.flags;
1454 dump->hdr.rc = vbg_req_perform(gdev, req);
1456 vbg_req_free(req, sizeof(*req));
1461 * Common IOCtl for user to kernel communication.
1462 * Return: 0 or negative errno value.
1463 * @session: The client session.
1464 * @req: The requested function.
1465 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1467 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1469 unsigned int req_no_size = req & ~IOCSIZE_MASK;
1470 struct vbg_dev *gdev = session->gdev;
1471 struct vbg_ioctl_hdr *hdr = data;
1472 bool f32bit = false;
1474 hdr->rc = VINF_SUCCESS;
1476 hdr->size_out = hdr->size_in;
1479 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1480 * already checked by vbg_misc_device_ioctl().
1483 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1484 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1485 req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
1486 req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
1487 return vbg_ioctl_vmmrequest(gdev, session, data);
1489 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1492 /* Fixed size requests. */
1494 case VBG_IOCTL_DRIVER_VERSION_INFO:
1495 return vbg_ioctl_driver_version_info(data);
1496 case VBG_IOCTL_HGCM_CONNECT:
1497 return vbg_ioctl_hgcm_connect(gdev, session, data);
1498 case VBG_IOCTL_HGCM_DISCONNECT:
1499 return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1500 case VBG_IOCTL_WAIT_FOR_EVENTS:
1501 return vbg_ioctl_wait_for_events(gdev, session, data);
1502 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1503 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1504 case VBG_IOCTL_CHANGE_FILTER_MASK:
1505 return vbg_ioctl_change_filter_mask(gdev, session, data);
1506 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1507 return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1508 case VBG_IOCTL_CHECK_BALLOON:
1509 return vbg_ioctl_check_balloon(gdev, data);
1510 case VBG_IOCTL_WRITE_CORE_DUMP:
1511 return vbg_ioctl_write_core_dump(gdev, data);
1514 /* Variable sized requests. */
1515 switch (req_no_size) {
1516 #ifdef CONFIG_COMPAT
1517 case VBG_IOCTL_HGCM_CALL_32(0):
1521 case VBG_IOCTL_HGCM_CALL(0):
1522 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1523 case VBG_IOCTL_LOG(0):
1524 case VBG_IOCTL_LOG_ALT(0):
1525 return vbg_ioctl_log(data);
1528 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1533 * Report guest supported mouse-features to the host.
1535 * Return: 0 or negative errno value.
1536 * @gdev: The Guest extension device.
1537 * @features: The set of features to report to the host.
1539 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1541 struct vmmdev_mouse_status *req;
1544 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
1548 req->mouse_features = features;
1549 req->pointer_pos_x = 0;
1550 req->pointer_pos_y = 0;
1552 rc = vbg_req_perform(gdev, req);
1554 vbg_err("%s error, rc: %d\n", __func__, rc);
1556 vbg_req_free(req, sizeof(*req));
1557 return vbg_status_code_to_errno(rc);
1560 /** Core interrupt service routine. */
1561 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1563 struct vbg_dev *gdev = dev_id;
1564 struct vmmdev_events *req = gdev->ack_events_req;
1565 bool mouse_position_changed = false;
1566 unsigned long flags;
1570 if (!gdev->mmio->V.V1_04.have_events)
1573 /* Get and acknowlegde events. */
1574 req->header.rc = VERR_INTERNAL_ERROR;
1576 rc = vbg_req_perform(gdev, req);
1578 vbg_err("Error performing events req, rc: %d\n", rc);
1582 events = req->events;
1584 if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1585 mouse_position_changed = true;
1586 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1589 if (events & VMMDEV_EVENT_HGCM) {
1590 wake_up(&gdev->hgcm_wq);
1591 events &= ~VMMDEV_EVENT_HGCM;
1594 if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1595 schedule_work(&gdev->mem_balloon.work);
1596 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1600 spin_lock_irqsave(&gdev->event_spinlock, flags);
1601 gdev->pending_events |= events;
1602 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1604 wake_up(&gdev->event_wq);
1607 if (mouse_position_changed)
1608 vbg_linux_mouse_event(gdev);