1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
13 #include <linux/kvm_host.h>
14 #include <linux/sched/stat.h>
16 #include <trace/events/kvm.h>
17 #include <xen/interface/xen.h>
18 #include <xen/interface/vcpu.h>
22 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
24 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
26 gpa_t gpa = gfn_to_gpa(gfn);
27 int wc_ofs, sec_hi_ofs;
29 int idx = srcu_read_lock(&kvm->srcu);
31 if (kvm_is_error_hva(gfn_to_hva(kvm, gfn))) {
35 kvm->arch.xen.shinfo_gfn = gfn;
37 /* Paranoia checks on the 32-bit struct layout */
38 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
39 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
40 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
42 /* 32-bit location by default */
43 wc_ofs = offsetof(struct compat_shared_info, wc);
44 sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi);
47 /* Paranoia checks on the 64-bit struct layout */
48 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
49 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
51 if (kvm->arch.xen.long_mode) {
52 wc_ofs = offsetof(struct shared_info, wc);
53 sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi);
57 kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs);
58 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
61 srcu_read_unlock(&kvm->srcu, idx);
65 static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
67 struct kvm_vcpu_xen *vx = &v->arch.xen;
68 u64 now = get_kvmclock_ns(v->kvm);
69 u64 delta_ns = now - vx->runstate_entry_time;
70 u64 run_delay = current->sched_info.run_delay;
72 if (unlikely(!vx->runstate_entry_time))
73 vx->current_runstate = RUNSTATE_offline;
76 * Time waiting for the scheduler isn't "stolen" if the
77 * vCPU wasn't running anyway.
79 if (vx->current_runstate == RUNSTATE_running) {
80 u64 steal_ns = run_delay - vx->last_steal;
84 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
86 vx->last_steal = run_delay;
88 vx->runstate_times[vx->current_runstate] += delta_ns;
89 vx->current_runstate = state;
90 vx->runstate_entry_time = now;
93 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
95 struct kvm_vcpu_xen *vx = &v->arch.xen;
96 struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
97 struct kvm_memslots *slots = kvm_memslots(v->kvm);
98 bool atomic = (state == RUNSTATE_runnable);
99 uint64_t state_entry_time;
100 int __user *user_state;
101 uint64_t __user *user_times;
103 kvm_xen_update_runstate(v, state);
105 if (!vx->runstate_set)
108 if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
109 kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
112 /* We made sure it fits in a single page */
113 BUG_ON(!ghc->memslot);
119 * The only difference between 32-bit and 64-bit versions of the
120 * runstate struct us the alignment of uint64_t in 32-bit, which
121 * means that the 64-bit version has an additional 4 bytes of
122 * padding after the first field 'state'.
124 * So we use 'int __user *user_state' to point to the state field,
125 * and 'uint64_t __user *user_times' for runstate_entry_time. So
126 * the actual array of time[] in each state starts at user_times[1].
128 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
129 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
130 user_state = (int __user *)ghc->hva;
132 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
134 user_times = (uint64_t __user *)(ghc->hva +
135 offsetof(struct compat_vcpu_runstate_info,
138 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
139 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
140 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
141 offsetof(struct compat_vcpu_runstate_info, time) + 4);
143 if (v->kvm->arch.xen.long_mode)
144 user_times = (uint64_t __user *)(ghc->hva +
145 offsetof(struct vcpu_runstate_info,
149 * First write the updated state_entry_time at the appropriate
150 * location determined by 'offset'.
152 state_entry_time = vx->runstate_entry_time;
153 state_entry_time |= XEN_RUNSTATE_UPDATE;
155 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) !=
156 sizeof(state_entry_time));
157 BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
158 sizeof(state_entry_time));
160 if (__put_user(state_entry_time, user_times))
165 * Next, write the new runstate. This is in the *same* place
166 * for 32-bit and 64-bit guests, asserted here for paranoia.
168 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
169 offsetof(struct compat_vcpu_runstate_info, state));
170 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) !=
171 sizeof(vx->current_runstate));
172 BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
173 sizeof(vx->current_runstate));
175 if (__put_user(vx->current_runstate, user_state))
179 * Write the actual runstate times immediately after the
180 * runstate_entry_time.
182 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
183 offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
184 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
185 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
186 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
187 sizeof(((struct compat_vcpu_runstate_info *)0)->time));
188 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
189 sizeof(vx->runstate_times));
191 if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
196 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
197 * runstate_entry_time field.
199 state_entry_time &= ~XEN_RUNSTATE_UPDATE;
200 __put_user(state_entry_time, user_times);
204 mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
210 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
216 * If the global upcall vector (HVMIRQ_callback_vector) is set and
217 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
219 struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
220 struct kvm_memslots *slots = kvm_memslots(v->kvm);
221 unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
223 /* No need for compat handling here */
224 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
225 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
226 BUILD_BUG_ON(sizeof(rc) !=
227 sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
228 BUILD_BUG_ON(sizeof(rc) !=
229 sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
232 * For efficiency, this mirrors the checks for using the valid
233 * cache in kvm_read_guest_offset_cached(), but just uses
234 * __get_user() instead. And falls back to the slow path.
236 if (likely(slots->generation == ghc->generation &&
237 !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
240 err = __get_user(rc, (u8 __user *)ghc->hva + offset);
249 * This function gets called from kvm_vcpu_block() after setting the
250 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
251 * from a HLT. So we really mustn't sleep. If the page ended up absent
252 * at that point, just return 1 in order to trigger an immediate wake,
253 * and we'll end up getting called again from a context where we *can*
254 * fault in the page and wait for it.
256 if (in_atomic() || !task_is_running(current))
259 kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
265 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
269 mutex_lock(&kvm->lock);
271 switch (data->type) {
272 case KVM_XEN_ATTR_TYPE_LONG_MODE:
273 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
276 kvm->arch.xen.long_mode = !!data->u.long_mode;
281 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
282 if (data->u.shared_info.gfn == GPA_INVALID) {
283 kvm->arch.xen.shinfo_gfn = GPA_INVALID;
287 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
291 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
292 if (data->u.vector && data->u.vector < 0x10)
295 kvm->arch.xen.upcall_vector = data->u.vector;
304 mutex_unlock(&kvm->lock);
308 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
312 mutex_lock(&kvm->lock);
314 switch (data->type) {
315 case KVM_XEN_ATTR_TYPE_LONG_MODE:
316 data->u.long_mode = kvm->arch.xen.long_mode;
320 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
321 data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn;
325 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
326 data->u.vector = kvm->arch.xen.upcall_vector;
334 mutex_unlock(&kvm->lock);
338 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
340 int idx, r = -ENOENT;
342 mutex_lock(&vcpu->kvm->lock);
343 idx = srcu_read_lock(&vcpu->kvm->srcu);
345 switch (data->type) {
346 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
347 /* No compat necessary here. */
348 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
349 sizeof(struct compat_vcpu_info));
350 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
351 offsetof(struct compat_vcpu_info, time));
353 if (data->u.gpa == GPA_INVALID) {
354 vcpu->arch.xen.vcpu_info_set = false;
359 /* It must fit within a single page */
360 if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
365 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
366 &vcpu->arch.xen.vcpu_info_cache,
368 sizeof(struct vcpu_info));
370 vcpu->arch.xen.vcpu_info_set = true;
371 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
375 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
376 if (data->u.gpa == GPA_INVALID) {
377 vcpu->arch.xen.vcpu_time_info_set = false;
382 /* It must fit within a single page */
383 if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
388 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
389 &vcpu->arch.xen.vcpu_time_info_cache,
391 sizeof(struct pvclock_vcpu_time_info));
393 vcpu->arch.xen.vcpu_time_info_set = true;
394 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
398 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
399 if (!sched_info_on()) {
403 if (data->u.gpa == GPA_INVALID) {
404 vcpu->arch.xen.runstate_set = false;
409 /* It must fit within a single page */
410 if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
415 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
416 &vcpu->arch.xen.runstate_cache,
418 sizeof(struct vcpu_runstate_info));
420 vcpu->arch.xen.runstate_set = true;
424 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
425 if (!sched_info_on()) {
429 if (data->u.runstate.state > RUNSTATE_offline) {
434 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
438 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
439 if (!sched_info_on()) {
443 if (data->u.runstate.state > RUNSTATE_offline) {
447 if (data->u.runstate.state_entry_time !=
448 (data->u.runstate.time_running +
449 data->u.runstate.time_runnable +
450 data->u.runstate.time_blocked +
451 data->u.runstate.time_offline)) {
455 if (get_kvmclock_ns(vcpu->kvm) <
456 data->u.runstate.state_entry_time) {
461 vcpu->arch.xen.current_runstate = data->u.runstate.state;
462 vcpu->arch.xen.runstate_entry_time =
463 data->u.runstate.state_entry_time;
464 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
465 data->u.runstate.time_running;
466 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
467 data->u.runstate.time_runnable;
468 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
469 data->u.runstate.time_blocked;
470 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
471 data->u.runstate.time_offline;
472 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
476 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
477 if (!sched_info_on()) {
481 if (data->u.runstate.state > RUNSTATE_offline &&
482 data->u.runstate.state != (u64)-1) {
486 /* The adjustment must add up */
487 if (data->u.runstate.state_entry_time !=
488 (data->u.runstate.time_running +
489 data->u.runstate.time_runnable +
490 data->u.runstate.time_blocked +
491 data->u.runstate.time_offline)) {
496 if (get_kvmclock_ns(vcpu->kvm) <
497 (vcpu->arch.xen.runstate_entry_time +
498 data->u.runstate.state_entry_time)) {
503 vcpu->arch.xen.runstate_entry_time +=
504 data->u.runstate.state_entry_time;
505 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
506 data->u.runstate.time_running;
507 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
508 data->u.runstate.time_runnable;
509 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
510 data->u.runstate.time_blocked;
511 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
512 data->u.runstate.time_offline;
514 if (data->u.runstate.state <= RUNSTATE_offline)
515 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
523 srcu_read_unlock(&vcpu->kvm->srcu, idx);
524 mutex_unlock(&vcpu->kvm->lock);
528 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
532 mutex_lock(&vcpu->kvm->lock);
534 switch (data->type) {
535 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
536 if (vcpu->arch.xen.vcpu_info_set)
537 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
539 data->u.gpa = GPA_INVALID;
543 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
544 if (vcpu->arch.xen.vcpu_time_info_set)
545 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
547 data->u.gpa = GPA_INVALID;
551 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
552 if (!sched_info_on()) {
556 if (vcpu->arch.xen.runstate_set) {
557 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
562 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
563 if (!sched_info_on()) {
567 data->u.runstate.state = vcpu->arch.xen.current_runstate;
571 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
572 if (!sched_info_on()) {
576 data->u.runstate.state = vcpu->arch.xen.current_runstate;
577 data->u.runstate.state_entry_time =
578 vcpu->arch.xen.runstate_entry_time;
579 data->u.runstate.time_running =
580 vcpu->arch.xen.runstate_times[RUNSTATE_running];
581 data->u.runstate.time_runnable =
582 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
583 data->u.runstate.time_blocked =
584 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
585 data->u.runstate.time_offline =
586 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
590 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
598 mutex_unlock(&vcpu->kvm->lock);
602 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
604 struct kvm *kvm = vcpu->kvm;
605 u32 page_num = data & ~PAGE_MASK;
606 u64 page_addr = data & PAGE_MASK;
607 bool lm = is_long_mode(vcpu);
609 /* Latch long_mode for shared_info pages etc. */
610 vcpu->kvm->arch.xen.long_mode = lm;
613 * If Xen hypercall intercept is enabled, fill the hypercall
614 * page with VMCALL/VMMCALL instructions since that's what
615 * we catch. Else the VMM has provided the hypercall pages
616 * with instructions of its own choosing, so use those.
618 if (kvm_xen_hypercall_enabled(kvm)) {
625 /* mov imm32, %eax */
626 instructions[0] = 0xb8;
628 /* vmcall / vmmcall */
629 kvm_x86_ops.patch_hypercall(vcpu, instructions + 5);
632 instructions[8] = 0xc3;
635 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
637 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
638 *(u32 *)&instructions[1] = i;
639 if (kvm_vcpu_write_guest(vcpu,
640 page_addr + (i * sizeof(instructions)),
641 instructions, sizeof(instructions)))
646 * Note, truncation is a non-issue as 'lm' is guaranteed to be
647 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
649 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
650 : kvm->arch.xen_hvm_config.blob_addr_32;
651 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
652 : kvm->arch.xen_hvm_config.blob_size_32;
655 if (page_num >= blob_size)
658 blob_addr += page_num * PAGE_SIZE;
660 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
662 return PTR_ERR(page);
664 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
672 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
674 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
678 * With hypercall interception the kernel generates its own
679 * hypercall page so it must not be provided.
681 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
682 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
683 xhc->blob_size_32 || xhc->blob_size_64))
686 mutex_lock(&kvm->lock);
688 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
689 static_branch_inc(&kvm_xen_enabled.key);
690 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
691 static_branch_slow_dec_deferred(&kvm_xen_enabled);
693 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
695 mutex_unlock(&kvm->lock);
699 void kvm_xen_init_vm(struct kvm *kvm)
701 kvm->arch.xen.shinfo_gfn = GPA_INVALID;
704 void kvm_xen_destroy_vm(struct kvm *kvm)
706 if (kvm->arch.xen_hvm_config.msr)
707 static_branch_slow_dec_deferred(&kvm_xen_enabled);
710 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
712 kvm_rax_write(vcpu, result);
713 return kvm_skip_emulated_instruction(vcpu);
716 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
718 struct kvm_run *run = vcpu->run;
720 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
723 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
726 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
729 u64 input, params[6];
731 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
733 /* Hyper-V hypercalls get bit 31 set in EAX */
734 if ((input & 0x80000000) &&
735 kvm_hv_hypercall_enabled(vcpu))
736 return kvm_hv_hypercall(vcpu);
738 longmode = is_64_bit_hypercall(vcpu);
740 params[0] = (u32)kvm_rbx_read(vcpu);
741 params[1] = (u32)kvm_rcx_read(vcpu);
742 params[2] = (u32)kvm_rdx_read(vcpu);
743 params[3] = (u32)kvm_rsi_read(vcpu);
744 params[4] = (u32)kvm_rdi_read(vcpu);
745 params[5] = (u32)kvm_rbp_read(vcpu);
749 params[0] = (u64)kvm_rdi_read(vcpu);
750 params[1] = (u64)kvm_rsi_read(vcpu);
751 params[2] = (u64)kvm_rdx_read(vcpu);
752 params[3] = (u64)kvm_r10_read(vcpu);
753 params[4] = (u64)kvm_r8_read(vcpu);
754 params[5] = (u64)kvm_r9_read(vcpu);
757 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
758 params[3], params[4], params[5]);
760 vcpu->run->exit_reason = KVM_EXIT_XEN;
761 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
762 vcpu->run->xen.u.hcall.longmode = longmode;
763 vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu);
764 vcpu->run->xen.u.hcall.input = input;
765 vcpu->run->xen.u.hcall.params[0] = params[0];
766 vcpu->run->xen.u.hcall.params[1] = params[1];
767 vcpu->run->xen.u.hcall.params[2] = params[2];
768 vcpu->run->xen.u.hcall.params[3] = params[3];
769 vcpu->run->xen.u.hcall.params[4] = params[4];
770 vcpu->run->xen.u.hcall.params[5] = params[5];
771 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
772 vcpu->arch.complete_userspace_io =
773 kvm_xen_hypercall_complete_userspace;