1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/eventfd.h>
16 #include <linux/kvm_host.h>
17 #include <linux/sched/stat.h>
19 #include <trace/events/kvm.h>
20 #include <xen/interface/xen.h>
21 #include <xen/interface/vcpu.h>
22 #include <xen/interface/version.h>
23 #include <xen/interface/event_channel.h>
24 #include <xen/interface/sched.h>
26 #include <asm/xen/cpuid.h>
31 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
32 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
33 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
35 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
37 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
39 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
40 struct pvclock_wall_clock *wc;
41 gpa_t gpa = gfn_to_gpa(gfn);
46 int idx = srcu_read_lock(&kvm->srcu);
48 if (gfn == KVM_XEN_INVALID_GFN) {
49 kvm_gpc_deactivate(gpc);
54 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
59 * This code mirrors kvm_write_wall_clock() except that it writes
60 * directly through the pfn cache and doesn't mark the page dirty.
62 wall_nsec = kvm_get_wall_clock_epoch(kvm);
64 /* It could be invalid again already, so we need to check */
65 read_lock_irq(&gpc->lock);
70 read_unlock_irq(&gpc->lock);
73 /* Paranoia checks on the 32-bit struct layout */
74 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
75 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
76 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
79 /* Paranoia checks on the 64-bit struct layout */
80 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
81 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
83 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
84 struct shared_info *shinfo = gpc->khva;
86 wc_sec_hi = &shinfo->wc_sec_hi;
91 struct compat_shared_info *shinfo = gpc->khva;
93 wc_sec_hi = &shinfo->arch.wc_sec_hi;
97 /* Increment and ensure an odd value */
98 wc_version = wc->version = (wc->version + 1) | 1;
101 wc->nsec = do_div(wall_nsec, NSEC_PER_SEC);
102 wc->sec = (u32)wall_nsec;
103 *wc_sec_hi = wall_nsec >> 32;
106 wc->version = wc_version + 1;
107 read_unlock_irq(&gpc->lock);
109 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
112 srcu_read_unlock(&kvm->srcu, idx);
116 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
118 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
119 struct kvm_xen_evtchn e;
121 e.vcpu_id = vcpu->vcpu_id;
122 e.vcpu_idx = vcpu->vcpu_idx;
123 e.port = vcpu->arch.xen.timer_virq;
124 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
126 kvm_xen_set_evtchn(&e, vcpu->kvm);
128 vcpu->arch.xen.timer_expires = 0;
129 atomic_set(&vcpu->arch.xen.timer_pending, 0);
133 static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
135 struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
137 struct kvm_xen_evtchn e;
140 if (atomic_read(&vcpu->arch.xen.timer_pending))
141 return HRTIMER_NORESTART;
143 e.vcpu_id = vcpu->vcpu_id;
144 e.vcpu_idx = vcpu->vcpu_idx;
145 e.port = vcpu->arch.xen.timer_virq;
146 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
148 rc = kvm_xen_set_evtchn_fast(&e, vcpu->kvm);
149 if (rc != -EWOULDBLOCK) {
150 vcpu->arch.xen.timer_expires = 0;
151 return HRTIMER_NORESTART;
154 atomic_inc(&vcpu->arch.xen.timer_pending);
155 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
158 return HRTIMER_NORESTART;
161 static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
164 * Avoid races with the old timer firing. Checking timer_expires
165 * to avoid calling hrtimer_cancel() will only have false positives
168 if (vcpu->arch.xen.timer_expires)
169 hrtimer_cancel(&vcpu->arch.xen.timer);
171 atomic_set(&vcpu->arch.xen.timer_pending, 0);
172 vcpu->arch.xen.timer_expires = guest_abs;
175 xen_timer_callback(&vcpu->arch.xen.timer);
177 ktime_t ktime_now = ktime_get();
178 hrtimer_start(&vcpu->arch.xen.timer,
179 ktime_add_ns(ktime_now, delta_ns),
180 HRTIMER_MODE_ABS_HARD);
184 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
186 hrtimer_cancel(&vcpu->arch.xen.timer);
187 vcpu->arch.xen.timer_expires = 0;
188 atomic_set(&vcpu->arch.xen.timer_pending, 0);
191 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
193 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
194 HRTIMER_MODE_ABS_HARD);
195 vcpu->arch.xen.timer.function = xen_timer_callback;
198 static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
200 struct kvm_vcpu_xen *vx = &v->arch.xen;
201 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache;
202 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
203 size_t user_len, user_len1, user_len2;
204 struct vcpu_runstate_info rs;
207 uint8_t *update_bit = NULL;
213 * The only difference between 32-bit and 64-bit versions of the
214 * runstate struct is the alignment of uint64_t in 32-bit, which
215 * means that the 64-bit version has an additional 4 bytes of
216 * padding after the first field 'state'. Let's be really really
217 * paranoid about that, and matching it with our internal data
218 * structures that we memcpy into it...
220 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
221 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
222 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
225 * The 64-bit structure has 4 bytes of padding before 'state_entry_time'
226 * so each subsequent field is shifted by 4, and it's 4 bytes longer.
228 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
229 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
230 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
231 offsetof(struct compat_vcpu_runstate_info, time) + 4);
232 BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4);
235 * The state field is in the same place at the start of both structs,
236 * and is the same size (int) as vx->current_runstate.
238 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
239 offsetof(struct compat_vcpu_runstate_info, state));
240 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
241 sizeof(vx->current_runstate));
242 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
243 sizeof(vx->current_runstate));
246 * The state_entry_time field is 64 bits in both versions, and the
247 * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86
248 * is little-endian means that it's in the last *byte* of the word.
249 * That detail is important later.
251 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
253 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
255 BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80);
258 * The time array is four 64-bit quantities in both versions, matching
259 * the vx->runstate_times and immediately following state_entry_time.
261 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
262 offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t));
263 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
264 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t));
265 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
266 sizeof_field(struct compat_vcpu_runstate_info, time));
267 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
268 sizeof(vx->runstate_times));
270 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
271 user_len = sizeof(struct vcpu_runstate_info);
272 times_ofs = offsetof(struct vcpu_runstate_info,
275 user_len = sizeof(struct compat_vcpu_runstate_info);
276 times_ofs = offsetof(struct compat_vcpu_runstate_info,
281 * There are basically no alignment constraints. The guest can set it
282 * up so it crosses from one page to the next, and at arbitrary byte
283 * alignment (and the 32-bit ABI doesn't align the 64-bit integers
284 * anyway, even if the overall struct had been 64-bit aligned).
286 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
287 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
288 user_len2 = user_len - user_len1;
290 user_len1 = user_len;
293 BUG_ON(user_len1 + user_len2 != user_len);
297 * Attempt to obtain the GPC lock on *both* (if there are two)
298 * gfn_to_pfn caches that cover the region.
301 local_irq_save(flags);
302 if (!read_trylock(&gpc1->lock)) {
303 local_irq_restore(flags);
307 read_lock_irqsave(&gpc1->lock, flags);
309 while (!kvm_gpc_check(gpc1, user_len1)) {
310 read_unlock_irqrestore(&gpc1->lock, flags);
312 /* When invoked from kvm_sched_out() we cannot sleep */
316 if (kvm_gpc_refresh(gpc1, user_len1))
319 read_lock_irqsave(&gpc1->lock, flags);
322 if (likely(!user_len2)) {
324 * Set up three pointers directly to the runstate_info
325 * struct in the guest (via the GPC).
327 * • @rs_state → state field
328 * • @rs_times → state_entry_time field.
329 * • @update_bit → last byte of state_entry_time, which
330 * contains the XEN_RUNSTATE_UPDATE bit.
332 rs_state = gpc1->khva;
333 rs_times = gpc1->khva + times_ofs;
334 if (v->kvm->arch.xen.runstate_update_flag)
335 update_bit = ((void *)(&rs_times[1])) - 1;
338 * The guest's runstate_info is split across two pages and we
339 * need to hold and validate both GPCs simultaneously. We can
340 * declare a lock ordering GPC1 > GPC2 because nothing else
341 * takes them more than one at a time. Set a subclass on the
342 * gpc1 lock to make lockdep shut up about it.
344 lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
346 if (!read_trylock(&gpc2->lock)) {
347 read_unlock_irqrestore(&gpc1->lock, flags);
351 read_lock(&gpc2->lock);
354 if (!kvm_gpc_check(gpc2, user_len2)) {
355 read_unlock(&gpc2->lock);
356 read_unlock_irqrestore(&gpc1->lock, flags);
358 /* When invoked from kvm_sched_out() we cannot sleep */
363 * Use kvm_gpc_activate() here because if the runstate
364 * area was configured in 32-bit mode and only extends
365 * to the second page now because the guest changed to
366 * 64-bit mode, the second GPC won't have been set up.
368 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
373 * We dropped the lock on GPC1 so we have to go all the
374 * way back and revalidate that too.
380 * In this case, the runstate_info struct will be assembled on
381 * the kernel stack (compat or not as appropriate) and will
382 * be copied to GPC1/GPC2 with a dual memcpy. Set up the three
383 * rs pointers accordingly.
385 rs_times = &rs.state_entry_time;
388 * The rs_state pointer points to the start of what we'll
389 * copy to the guest, which in the case of a compat guest
390 * is the 32-bit field that the compiler thinks is padding.
392 rs_state = ((void *)rs_times) - times_ofs;
395 * The update_bit is still directly in the guest memory,
396 * via one GPC or the other.
398 if (v->kvm->arch.xen.runstate_update_flag) {
399 if (user_len1 >= times_ofs + sizeof(uint64_t))
400 update_bit = gpc1->khva + times_ofs +
401 sizeof(uint64_t) - 1;
403 update_bit = gpc2->khva + times_ofs +
404 sizeof(uint64_t) - 1 - user_len1;
409 * Don't leak kernel memory through the padding in the 64-bit
410 * version of the struct.
412 memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time));
417 * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the
418 * state_entry_time field, directly in the guest. We need to set
419 * that (and write-barrier) before writing to the rest of the
420 * structure, and clear it last. Just as Xen does, we address the
421 * single *byte* in which it resides because it might be in a
422 * different cache line to the rest of the 64-bit word, due to
423 * the (lack of) alignment constraints.
425 entry_time = vx->runstate_entry_time;
427 entry_time |= XEN_RUNSTATE_UPDATE;
428 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56;
433 * Now assemble the actual structure, either on our kernel stack
434 * or directly in the guest according to how the rs_state and
435 * rs_times pointers were set up above.
437 *rs_state = vx->current_runstate;
438 rs_times[0] = entry_time;
439 memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
441 /* For the split case, we have to then copy it to the guest. */
443 memcpy(gpc1->khva, rs_state, user_len1);
444 memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2);
448 /* Finally, clear the XEN_RUNSTATE_UPDATE bit. */
450 entry_time &= ~XEN_RUNSTATE_UPDATE;
451 *update_bit = entry_time >> 56;
456 read_unlock(&gpc2->lock);
458 read_unlock_irqrestore(&gpc1->lock, flags);
460 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
462 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
465 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
467 struct kvm_vcpu_xen *vx = &v->arch.xen;
468 u64 now = get_kvmclock_ns(v->kvm);
469 u64 delta_ns = now - vx->runstate_entry_time;
470 u64 run_delay = current->sched_info.run_delay;
472 if (unlikely(!vx->runstate_entry_time))
473 vx->current_runstate = RUNSTATE_offline;
476 * Time waiting for the scheduler isn't "stolen" if the
477 * vCPU wasn't running anyway.
479 if (vx->current_runstate == RUNSTATE_running) {
480 u64 steal_ns = run_delay - vx->last_steal;
482 delta_ns -= steal_ns;
484 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
486 vx->last_steal = run_delay;
488 vx->runstate_times[vx->current_runstate] += delta_ns;
489 vx->current_runstate = state;
490 vx->runstate_entry_time = now;
492 if (vx->runstate_cache.active)
493 kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
496 void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
498 struct kvm_lapic_irq irq = { };
501 irq.dest_id = v->vcpu_id;
502 irq.vector = v->arch.xen.upcall_vector;
503 irq.dest_mode = APIC_DEST_PHYSICAL;
504 irq.shorthand = APIC_DEST_NOSHORT;
505 irq.delivery_mode = APIC_DM_FIXED;
508 /* The fast version will always work for physical unicast */
509 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
513 * On event channel delivery, the vcpu_info may not have been accessible.
514 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
515 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
516 * Do so now that we can sleep in the context of the vCPU to bring the
517 * page in, and refresh the pfn cache for it.
519 void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
521 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
522 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
525 if (!evtchn_pending_sel)
529 * Yes, this is an open-coded loop. But that's just what put_user()
530 * does anyway. Page it in and retry the instruction. We're just a
531 * little more honest about it.
533 read_lock_irqsave(&gpc->lock, flags);
534 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
535 read_unlock_irqrestore(&gpc->lock, flags);
537 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
540 read_lock_irqsave(&gpc->lock, flags);
543 /* Now gpc->khva is a valid kernel address for the vcpu_info */
544 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
545 struct vcpu_info *vi = gpc->khva;
547 asm volatile(LOCK_PREFIX "orq %0, %1\n"
549 LOCK_PREFIX "andq %0, %2\n"
550 : "=r" (evtchn_pending_sel),
551 "+m" (vi->evtchn_pending_sel),
552 "+m" (v->arch.xen.evtchn_pending_sel)
553 : "0" (evtchn_pending_sel));
554 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
556 u32 evtchn_pending_sel32 = evtchn_pending_sel;
557 struct compat_vcpu_info *vi = gpc->khva;
559 asm volatile(LOCK_PREFIX "orl %0, %1\n"
561 LOCK_PREFIX "andl %0, %2\n"
562 : "=r" (evtchn_pending_sel32),
563 "+m" (vi->evtchn_pending_sel),
564 "+m" (v->arch.xen.evtchn_pending_sel)
565 : "0" (evtchn_pending_sel32));
566 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
568 read_unlock_irqrestore(&gpc->lock, flags);
570 /* For the per-vCPU lapic vector, deliver it as MSI. */
571 if (v->arch.xen.upcall_vector)
572 kvm_xen_inject_vcpu_vector(v);
574 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
577 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
579 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
584 * If the global upcall vector (HVMIRQ_callback_vector) is set and
585 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
588 /* No need for compat handling here */
589 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
590 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
591 BUILD_BUG_ON(sizeof(rc) !=
592 sizeof_field(struct vcpu_info, evtchn_upcall_pending));
593 BUILD_BUG_ON(sizeof(rc) !=
594 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
596 read_lock_irqsave(&gpc->lock, flags);
597 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
598 read_unlock_irqrestore(&gpc->lock, flags);
601 * This function gets called from kvm_vcpu_block() after setting the
602 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
603 * from a HLT. So we really mustn't sleep. If the page ended up absent
604 * at that point, just return 1 in order to trigger an immediate wake,
605 * and we'll end up getting called again from a context where we *can*
606 * fault in the page and wait for it.
608 if (in_atomic() || !task_is_running(current))
611 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
613 * If this failed, userspace has screwed up the
614 * vcpu_info mapping. No interrupts for you.
618 read_lock_irqsave(&gpc->lock, flags);
621 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
622 read_unlock_irqrestore(&gpc->lock, flags);
626 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
631 switch (data->type) {
632 case KVM_XEN_ATTR_TYPE_LONG_MODE:
633 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
636 mutex_lock(&kvm->arch.xen.xen_lock);
637 kvm->arch.xen.long_mode = !!data->u.long_mode;
638 mutex_unlock(&kvm->arch.xen.xen_lock);
643 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
644 mutex_lock(&kvm->arch.xen.xen_lock);
645 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
646 mutex_unlock(&kvm->arch.xen.xen_lock);
649 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
650 if (data->u.vector && data->u.vector < 0x10)
653 mutex_lock(&kvm->arch.xen.xen_lock);
654 kvm->arch.xen.upcall_vector = data->u.vector;
655 mutex_unlock(&kvm->arch.xen.xen_lock);
660 case KVM_XEN_ATTR_TYPE_EVTCHN:
661 r = kvm_xen_setattr_evtchn(kvm, data);
664 case KVM_XEN_ATTR_TYPE_XEN_VERSION:
665 mutex_lock(&kvm->arch.xen.xen_lock);
666 kvm->arch.xen.xen_version = data->u.xen_version;
667 mutex_unlock(&kvm->arch.xen.xen_lock);
671 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
672 if (!sched_info_on()) {
676 mutex_lock(&kvm->arch.xen.xen_lock);
677 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
678 mutex_unlock(&kvm->arch.xen.xen_lock);
689 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
693 mutex_lock(&kvm->arch.xen.xen_lock);
695 switch (data->type) {
696 case KVM_XEN_ATTR_TYPE_LONG_MODE:
697 data->u.long_mode = kvm->arch.xen.long_mode;
701 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
702 if (kvm->arch.xen.shinfo_cache.active)
703 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
705 data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
709 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
710 data->u.vector = kvm->arch.xen.upcall_vector;
714 case KVM_XEN_ATTR_TYPE_XEN_VERSION:
715 data->u.xen_version = kvm->arch.xen.xen_version;
719 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
720 if (!sched_info_on()) {
724 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag;
732 mutex_unlock(&kvm->arch.xen.xen_lock);
736 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
738 int idx, r = -ENOENT;
740 mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
741 idx = srcu_read_lock(&vcpu->kvm->srcu);
743 switch (data->type) {
744 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
745 /* No compat necessary here. */
746 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
747 sizeof(struct compat_vcpu_info));
748 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
749 offsetof(struct compat_vcpu_info, time));
751 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
752 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
757 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
758 data->u.gpa, sizeof(struct vcpu_info));
760 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
764 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
765 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
766 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
771 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
773 sizeof(struct pvclock_vcpu_time_info));
775 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
778 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: {
781 if (!sched_info_on()) {
785 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
788 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
789 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
794 * If the guest switches to 64-bit mode after setting the runstate
795 * address, that's actually OK. kvm_xen_update_runstate_guest()
798 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode)
799 sz = sizeof(struct vcpu_runstate_info);
801 sz = sizeof(struct compat_vcpu_runstate_info);
803 /* How much fits in the (first) page? */
804 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
805 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
810 /* Either map the second page, or deactivate the second GPC */
812 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
815 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
816 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
817 data->u.gpa + sz1, sz2);
822 kvm_xen_update_runstate_guest(vcpu, false);
825 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
826 if (!sched_info_on()) {
830 if (data->u.runstate.state > RUNSTATE_offline) {
835 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
839 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
840 if (!sched_info_on()) {
844 if (data->u.runstate.state > RUNSTATE_offline) {
848 if (data->u.runstate.state_entry_time !=
849 (data->u.runstate.time_running +
850 data->u.runstate.time_runnable +
851 data->u.runstate.time_blocked +
852 data->u.runstate.time_offline)) {
856 if (get_kvmclock_ns(vcpu->kvm) <
857 data->u.runstate.state_entry_time) {
862 vcpu->arch.xen.current_runstate = data->u.runstate.state;
863 vcpu->arch.xen.runstate_entry_time =
864 data->u.runstate.state_entry_time;
865 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
866 data->u.runstate.time_running;
867 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
868 data->u.runstate.time_runnable;
869 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
870 data->u.runstate.time_blocked;
871 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
872 data->u.runstate.time_offline;
873 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
877 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
878 if (!sched_info_on()) {
882 if (data->u.runstate.state > RUNSTATE_offline &&
883 data->u.runstate.state != (u64)-1) {
887 /* The adjustment must add up */
888 if (data->u.runstate.state_entry_time !=
889 (data->u.runstate.time_running +
890 data->u.runstate.time_runnable +
891 data->u.runstate.time_blocked +
892 data->u.runstate.time_offline)) {
897 if (get_kvmclock_ns(vcpu->kvm) <
898 (vcpu->arch.xen.runstate_entry_time +
899 data->u.runstate.state_entry_time)) {
904 vcpu->arch.xen.runstate_entry_time +=
905 data->u.runstate.state_entry_time;
906 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
907 data->u.runstate.time_running;
908 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
909 data->u.runstate.time_runnable;
910 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
911 data->u.runstate.time_blocked;
912 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
913 data->u.runstate.time_offline;
915 if (data->u.runstate.state <= RUNSTATE_offline)
916 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
917 else if (vcpu->arch.xen.runstate_cache.active)
918 kvm_xen_update_runstate_guest(vcpu, false);
922 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
923 if (data->u.vcpu_id >= KVM_MAX_VCPUS)
926 vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
931 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
932 if (data->u.timer.port &&
933 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
938 if (!vcpu->arch.xen.timer.function)
939 kvm_xen_init_timer(vcpu);
941 /* Stop the timer (if it's running) before changing the vector */
942 kvm_xen_stop_timer(vcpu);
943 vcpu->arch.xen.timer_virq = data->u.timer.port;
945 /* Start the timer if the new value has a valid vector+expiry. */
946 if (data->u.timer.port && data->u.timer.expires_ns)
947 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
948 data->u.timer.expires_ns -
949 get_kvmclock_ns(vcpu->kvm));
954 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
955 if (data->u.vector && data->u.vector < 0x10)
958 vcpu->arch.xen.upcall_vector = data->u.vector;
967 srcu_read_unlock(&vcpu->kvm->srcu, idx);
968 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
972 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
976 mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
978 switch (data->type) {
979 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
980 if (vcpu->arch.xen.vcpu_info_cache.active)
981 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
983 data->u.gpa = KVM_XEN_INVALID_GPA;
987 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
988 if (vcpu->arch.xen.vcpu_time_info_cache.active)
989 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
991 data->u.gpa = KVM_XEN_INVALID_GPA;
995 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
996 if (!sched_info_on()) {
1000 if (vcpu->arch.xen.runstate_cache.active) {
1001 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
1006 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
1007 if (!sched_info_on()) {
1011 data->u.runstate.state = vcpu->arch.xen.current_runstate;
1015 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
1016 if (!sched_info_on()) {
1020 data->u.runstate.state = vcpu->arch.xen.current_runstate;
1021 data->u.runstate.state_entry_time =
1022 vcpu->arch.xen.runstate_entry_time;
1023 data->u.runstate.time_running =
1024 vcpu->arch.xen.runstate_times[RUNSTATE_running];
1025 data->u.runstate.time_runnable =
1026 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
1027 data->u.runstate.time_blocked =
1028 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
1029 data->u.runstate.time_offline =
1030 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
1034 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
1038 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
1039 data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
1043 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
1045 * Ensure a consistent snapshot of state is captured, with a
1046 * timer either being pending, or the event channel delivered
1047 * to the corresponding bit in the shared_info. Not still
1048 * lurking in the timer_pending flag for deferred delivery.
1049 * Purely as an optimisation, if the timer_expires field is
1050 * zero, that means the timer isn't active (or even in the
1051 * timer_pending flag) and there is no need to cancel it.
1053 if (vcpu->arch.xen.timer_expires) {
1054 hrtimer_cancel(&vcpu->arch.xen.timer);
1055 kvm_xen_inject_timer_irqs(vcpu);
1058 data->u.timer.port = vcpu->arch.xen.timer_virq;
1059 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
1060 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
1063 * The hrtimer may trigger and raise the IRQ immediately,
1064 * while the returned state causes it to be set up and
1065 * raised again on the destination system after migration.
1066 * That's fine, as the guest won't even have had a chance
1067 * to run and handle the interrupt. Asserting an already
1068 * pending event channel is idempotent.
1070 if (vcpu->arch.xen.timer_expires)
1071 hrtimer_start_expires(&vcpu->arch.xen.timer,
1072 HRTIMER_MODE_ABS_HARD);
1077 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
1078 data->u.vector = vcpu->arch.xen.upcall_vector;
1086 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
1090 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
1092 struct kvm *kvm = vcpu->kvm;
1093 u32 page_num = data & ~PAGE_MASK;
1094 u64 page_addr = data & PAGE_MASK;
1095 bool lm = is_long_mode(vcpu);
1097 /* Latch long_mode for shared_info pages etc. */
1098 vcpu->kvm->arch.xen.long_mode = lm;
1101 * If Xen hypercall intercept is enabled, fill the hypercall
1102 * page with VMCALL/VMMCALL instructions since that's what
1103 * we catch. Else the VMM has provided the hypercall pages
1104 * with instructions of its own choosing, so use those.
1106 if (kvm_xen_hypercall_enabled(kvm)) {
1107 u8 instructions[32];
1113 /* mov imm32, %eax */
1114 instructions[0] = 0xb8;
1116 /* vmcall / vmmcall */
1117 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
1120 instructions[8] = 0xc3;
1123 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
1125 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
1126 *(u32 *)&instructions[1] = i;
1127 if (kvm_vcpu_write_guest(vcpu,
1128 page_addr + (i * sizeof(instructions)),
1129 instructions, sizeof(instructions)))
1134 * Note, truncation is a non-issue as 'lm' is guaranteed to be
1135 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
1137 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
1138 : kvm->arch.xen_hvm_config.blob_addr_32;
1139 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1140 : kvm->arch.xen_hvm_config.blob_size_32;
1144 if (page_num >= blob_size)
1147 blob_addr += page_num * PAGE_SIZE;
1149 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
1151 return PTR_ERR(page);
1153 ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
1161 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
1163 /* Only some feature flags need to be *enabled* by userspace */
1164 u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
1165 KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
1166 KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
1169 if (xhc->flags & ~permitted_flags)
1173 * With hypercall interception the kernel generates its own
1174 * hypercall page so it must not be provided.
1176 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
1177 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
1178 xhc->blob_size_32 || xhc->blob_size_64))
1181 mutex_lock(&kvm->arch.xen.xen_lock);
1183 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
1184 static_branch_inc(&kvm_xen_enabled.key);
1185 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
1186 static_branch_slow_dec_deferred(&kvm_xen_enabled);
1188 old_flags = kvm->arch.xen_hvm_config.flags;
1189 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
1191 mutex_unlock(&kvm->arch.xen.xen_lock);
1193 if ((old_flags ^ xhc->flags) & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
1194 kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
1199 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1201 kvm_rax_write(vcpu, result);
1202 return kvm_skip_emulated_instruction(vcpu);
1205 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1207 struct kvm_run *run = vcpu->run;
1209 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
1212 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
1215 static inline int max_evtchn_port(struct kvm *kvm)
1217 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
1218 return EVTCHN_2L_NR_CHANNELS;
1220 return COMPAT_EVTCHN_2L_NR_CHANNELS;
1223 static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
1224 evtchn_port_t *ports)
1226 struct kvm *kvm = vcpu->kvm;
1227 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1228 unsigned long *pending_bits;
1229 unsigned long flags;
1233 idx = srcu_read_lock(&kvm->srcu);
1234 read_lock_irqsave(&gpc->lock, flags);
1235 if (!kvm_gpc_check(gpc, PAGE_SIZE))
1239 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1240 struct shared_info *shinfo = gpc->khva;
1241 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1243 struct compat_shared_info *shinfo = gpc->khva;
1244 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1247 for (i = 0; i < nr_ports; i++) {
1248 if (test_bit(ports[i], pending_bits)) {
1255 read_unlock_irqrestore(&gpc->lock, flags);
1256 srcu_read_unlock(&kvm->srcu, idx);
1261 static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
1264 struct sched_poll sched_poll;
1265 evtchn_port_t port, *ports;
1266 struct x86_exception e;
1269 if (!lapic_in_kernel(vcpu) ||
1270 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
1273 if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
1274 struct compat_sched_poll sp32;
1276 /* Sanity check that the compat struct definition is correct */
1277 BUILD_BUG_ON(sizeof(sp32) != 16);
1279 if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
1285 * This is a 32-bit pointer to an array of evtchn_port_t which
1286 * are uint32_t, so once it's converted no further compat
1287 * handling is needed.
1289 sched_poll.ports = (void *)(unsigned long)(sp32.ports);
1290 sched_poll.nr_ports = sp32.nr_ports;
1291 sched_poll.timeout = sp32.timeout;
1293 if (kvm_read_guest_virt(vcpu, param, &sched_poll,
1294 sizeof(sched_poll), &e)) {
1300 if (unlikely(sched_poll.nr_ports > 1)) {
1301 /* Xen (unofficially) limits number of pollers to 128 */
1302 if (sched_poll.nr_ports > 128) {
1307 ports = kmalloc_array(sched_poll.nr_ports,
1308 sizeof(*ports), GFP_KERNEL);
1316 if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
1317 sched_poll.nr_ports * sizeof(*ports), &e)) {
1322 for (i = 0; i < sched_poll.nr_ports; i++) {
1323 if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
1329 if (sched_poll.nr_ports == 1)
1330 vcpu->arch.xen.poll_evtchn = port;
1332 vcpu->arch.xen.poll_evtchn = -1;
1334 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1336 if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
1337 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
1339 if (sched_poll.timeout)
1340 mod_timer(&vcpu->arch.xen.poll_timer,
1341 jiffies + nsecs_to_jiffies(sched_poll.timeout));
1343 kvm_vcpu_halt(vcpu);
1345 if (sched_poll.timeout)
1346 del_timer(&vcpu->arch.xen.poll_timer);
1348 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1351 vcpu->arch.xen.poll_evtchn = 0;
1354 /* Really, this is only needed in case of timeout */
1355 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1357 if (unlikely(sched_poll.nr_ports > 1))
1362 static void cancel_evtchn_poll(struct timer_list *t)
1364 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer);
1366 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1367 kvm_vcpu_kick(vcpu);
1370 static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode,
1371 int cmd, u64 param, u64 *r)
1375 if (kvm_xen_schedop_poll(vcpu, longmode, param, r))
1379 kvm_vcpu_on_spin(vcpu, true);
1389 struct compat_vcpu_set_singleshot_timer {
1390 uint64_t timeout_abs_ns;
1392 } __attribute__((packed));
1394 static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
1395 int vcpu_id, u64 param, u64 *r)
1397 struct vcpu_set_singleshot_timer oneshot;
1398 struct x86_exception e;
1401 if (!kvm_xen_timer_enabled(vcpu))
1405 case VCPUOP_set_singleshot_timer:
1406 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1412 * The only difference for 32-bit compat is the 4 bytes of
1413 * padding after the interesting part of the structure. So
1414 * for a faithful emulation of Xen we have to *try* to copy
1415 * the padding and return -EFAULT if we can't. Otherwise we
1416 * might as well just have copied the 12-byte 32-bit struct.
1418 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
1419 offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns));
1420 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
1421 sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns));
1422 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) !=
1423 offsetof(struct vcpu_set_singleshot_timer, flags));
1424 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
1425 sizeof_field(struct vcpu_set_singleshot_timer, flags));
1427 if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
1428 sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
1433 /* A delta <= 0 results in an immediate callback, which is what we want */
1434 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
1435 kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
1439 case VCPUOP_stop_singleshot_timer:
1440 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1444 kvm_xen_stop_timer(vcpu);
1452 static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
1455 if (!kvm_xen_timer_enabled(vcpu))
1459 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
1460 int64_t delta = timeout - guest_now;
1462 /* Xen has a 'Linux workaround' in do_set_timer_op() which
1463 * checks for negative absolute timeout values (caused by
1464 * integer overflow), and for values about 13 days in the
1465 * future (2^50ns) which would be caused by jiffies
1466 * overflow. For those cases, it sets the timeout 100ms in
1467 * the future (not *too* soon, since if a guest really did
1468 * set a long timeout on purpose we don't want to keep
1469 * churning CPU time by waking it up).
1471 if (unlikely((int64_t)timeout < 0 ||
1472 (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
1473 delta = 100 * NSEC_PER_MSEC;
1474 timeout = guest_now + delta;
1477 kvm_xen_start_timer(vcpu, timeout, delta);
1479 kvm_xen_stop_timer(vcpu);
1486 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
1489 u64 input, params[6], r = -ENOSYS;
1490 bool handled = false;
1493 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
1495 /* Hyper-V hypercalls get bit 31 set in EAX */
1496 if ((input & 0x80000000) &&
1497 kvm_hv_hypercall_enabled(vcpu))
1498 return kvm_hv_hypercall(vcpu);
1500 longmode = is_64_bit_hypercall(vcpu);
1502 params[0] = (u32)kvm_rbx_read(vcpu);
1503 params[1] = (u32)kvm_rcx_read(vcpu);
1504 params[2] = (u32)kvm_rdx_read(vcpu);
1505 params[3] = (u32)kvm_rsi_read(vcpu);
1506 params[4] = (u32)kvm_rdi_read(vcpu);
1507 params[5] = (u32)kvm_rbp_read(vcpu);
1509 #ifdef CONFIG_X86_64
1511 params[0] = (u64)kvm_rdi_read(vcpu);
1512 params[1] = (u64)kvm_rsi_read(vcpu);
1513 params[2] = (u64)kvm_rdx_read(vcpu);
1514 params[3] = (u64)kvm_r10_read(vcpu);
1515 params[4] = (u64)kvm_r8_read(vcpu);
1516 params[5] = (u64)kvm_r9_read(vcpu);
1519 cpl = static_call(kvm_x86_get_cpl)(vcpu);
1520 trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2],
1521 params[3], params[4], params[5]);
1524 * Only allow hypercall acceleration for CPL0. The rare hypercalls that
1525 * are permitted in guest userspace can be handled by the VMM.
1527 if (unlikely(cpl > 0))
1528 goto handle_in_userspace;
1531 case __HYPERVISOR_xen_version:
1532 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
1533 r = vcpu->kvm->arch.xen.xen_version;
1537 case __HYPERVISOR_event_channel_op:
1538 if (params[0] == EVTCHNOP_send)
1539 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
1541 case __HYPERVISOR_sched_op:
1542 handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0],
1545 case __HYPERVISOR_vcpu_op:
1546 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1],
1549 case __HYPERVISOR_set_timer_op: {
1550 u64 timeout = params[0];
1551 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */
1553 timeout |= params[1] << 32;
1554 handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r);
1562 return kvm_xen_hypercall_set_result(vcpu, r);
1564 handle_in_userspace:
1565 vcpu->run->exit_reason = KVM_EXIT_XEN;
1566 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
1567 vcpu->run->xen.u.hcall.longmode = longmode;
1568 vcpu->run->xen.u.hcall.cpl = cpl;
1569 vcpu->run->xen.u.hcall.input = input;
1570 vcpu->run->xen.u.hcall.params[0] = params[0];
1571 vcpu->run->xen.u.hcall.params[1] = params[1];
1572 vcpu->run->xen.u.hcall.params[2] = params[2];
1573 vcpu->run->xen.u.hcall.params[3] = params[3];
1574 vcpu->run->xen.u.hcall.params[4] = params[4];
1575 vcpu->run->xen.u.hcall.params[5] = params[5];
1576 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
1577 vcpu->arch.complete_userspace_io =
1578 kvm_xen_hypercall_complete_userspace;
1583 static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
1585 int poll_evtchn = vcpu->arch.xen.poll_evtchn;
1587 if ((poll_evtchn == port || poll_evtchn == -1) &&
1588 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
1589 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1590 kvm_vcpu_kick(vcpu);
1595 * The return value from this function is propagated to kvm_set_irq() API,
1597 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1598 * = 0 Interrupt was coalesced (previous irq is still pending)
1599 * > 0 Number of CPUs interrupt was delivered to
1601 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
1602 * only check on its return value is a comparison with -EWOULDBLOCK'.
1604 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1606 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1607 struct kvm_vcpu *vcpu;
1608 unsigned long *pending_bits, *mask_bits;
1609 unsigned long flags;
1611 bool kick_vcpu = false;
1612 int vcpu_idx, idx, rc;
1614 vcpu_idx = READ_ONCE(xe->vcpu_idx);
1616 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1618 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
1621 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
1624 if (!vcpu->arch.xen.vcpu_info_cache.active)
1627 if (xe->port >= max_evtchn_port(kvm))
1632 idx = srcu_read_lock(&kvm->srcu);
1634 read_lock_irqsave(&gpc->lock, flags);
1635 if (!kvm_gpc_check(gpc, PAGE_SIZE))
1638 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1639 struct shared_info *shinfo = gpc->khva;
1640 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1641 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1642 port_word_bit = xe->port / 64;
1644 struct compat_shared_info *shinfo = gpc->khva;
1645 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1646 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1647 port_word_bit = xe->port / 32;
1651 * If this port wasn't already set, and if it isn't masked, then
1652 * we try to set the corresponding bit in the in-kernel shadow of
1653 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
1654 * already set, then we kick the vCPU in question to write to the
1655 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
1657 if (test_and_set_bit(xe->port, pending_bits)) {
1658 rc = 0; /* It was already raised */
1659 } else if (test_bit(xe->port, mask_bits)) {
1660 rc = -ENOTCONN; /* Masked */
1661 kvm_xen_check_poller(vcpu, xe->port);
1663 rc = 1; /* Delivered to the bitmap in shared_info. */
1664 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
1665 read_unlock_irqrestore(&gpc->lock, flags);
1666 gpc = &vcpu->arch.xen.vcpu_info_cache;
1668 read_lock_irqsave(&gpc->lock, flags);
1669 if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
1671 * Could not access the vcpu_info. Set the bit in-kernel
1672 * and prod the vCPU to deliver it for itself.
1674 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
1679 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1680 struct vcpu_info *vcpu_info = gpc->khva;
1681 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
1682 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1686 struct compat_vcpu_info *vcpu_info = gpc->khva;
1687 if (!test_and_set_bit(port_word_bit,
1688 (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
1689 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1694 /* For the per-vCPU lapic vector, deliver it as MSI. */
1695 if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
1696 kvm_xen_inject_vcpu_vector(vcpu);
1702 read_unlock_irqrestore(&gpc->lock, flags);
1703 srcu_read_unlock(&kvm->srcu, idx);
1706 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1707 kvm_vcpu_kick(vcpu);
1713 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1715 bool mm_borrowed = false;
1718 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1719 if (rc != -EWOULDBLOCK)
1722 if (current->mm != kvm->mm) {
1724 * If not on a thread which already belongs to this KVM,
1725 * we'd better be in the irqfd workqueue.
1727 if (WARN_ON_ONCE(current->mm))
1730 kthread_use_mm(kvm->mm);
1734 mutex_lock(&kvm->arch.xen.xen_lock);
1737 * It is theoretically possible for the page to be unmapped
1738 * and the MMU notifier to invalidate the shared_info before
1739 * we even get to use it. In that case, this looks like an
1740 * infinite loop. It was tempting to do it via the userspace
1741 * HVA instead... but that just *hides* the fact that it's
1742 * an infinite loop, because if a fault occurs and it waits
1743 * for the page to come back, it can *still* immediately
1744 * fault and have to wait again, repeatedly.
1746 * Conversely, the page could also have been reinstated by
1747 * another thread before we even obtain the mutex above, so
1748 * check again *first* before remapping it.
1751 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1754 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1755 if (rc != -EWOULDBLOCK)
1758 idx = srcu_read_lock(&kvm->srcu);
1759 rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
1760 srcu_read_unlock(&kvm->srcu, idx);
1763 mutex_unlock(&kvm->arch.xen.xen_lock);
1766 kthread_unuse_mm(kvm->mm);
1771 /* This is the version called from kvm_set_irq() as the .set function */
1772 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1773 int irq_source_id, int level, bool line_status)
1778 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1782 * Set up an event channel interrupt from the KVM IRQ routing table.
1783 * Used for e.g. PIRQ from passed through physical devices.
1785 int kvm_xen_setup_evtchn(struct kvm *kvm,
1786 struct kvm_kernel_irq_routing_entry *e,
1787 const struct kvm_irq_routing_entry *ue)
1790 struct kvm_vcpu *vcpu;
1792 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1795 /* We only support 2 level event channels for now */
1796 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1800 * Xen gives us interesting mappings from vCPU index to APIC ID,
1801 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
1802 * to find it. Do that once at setup time, instead of every time.
1803 * But beware that on live update / live migration, the routing
1804 * table might be reinstated before the vCPU threads have finished
1805 * recreating their vCPUs.
1807 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1809 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx;
1811 e->xen_evtchn.vcpu_idx = -1;
1813 e->xen_evtchn.port = ue->u.xen_evtchn.port;
1814 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1815 e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1816 e->set = evtchn_set_fn;
1822 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
1824 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
1826 struct kvm_xen_evtchn e;
1829 if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1832 /* We only support 2 level event channels for now */
1833 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1837 e.vcpu_id = uxe->vcpu;
1839 e.priority = uxe->priority;
1841 ret = kvm_xen_set_evtchn(&e, kvm);
1844 * None of that 'return 1 if it actually got delivered' nonsense.
1845 * We don't care if it was masked (-ENOTCONN) either.
1847 if (ret > 0 || ret == -ENOTCONN)
1854 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
1860 struct kvm_xen_evtchn port;
1862 u32 port; /* zero */
1863 struct eventfd_ctx *ctx;
1869 * Update target vCPU or priority for a registered sending channel.
1871 static int kvm_xen_eventfd_update(struct kvm *kvm,
1872 struct kvm_xen_hvm_attr *data)
1874 u32 port = data->u.evtchn.send_port;
1875 struct evtchnfd *evtchnfd;
1878 /* Protect writes to evtchnfd as well as the idr lookup. */
1879 mutex_lock(&kvm->arch.xen.xen_lock);
1880 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1886 /* For an UPDATE, nothing may change except the priority/vcpu */
1888 if (evtchnfd->type != data->u.evtchn.type)
1892 * Port cannot change, and if it's zero that was an eventfd
1893 * which can't be changed either.
1895 if (!evtchnfd->deliver.port.port ||
1896 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
1899 /* We only support 2 level event channels for now */
1900 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1903 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1904 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
1905 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1906 evtchnfd->deliver.port.vcpu_idx = -1;
1910 mutex_unlock(&kvm->arch.xen.xen_lock);
1915 * Configure the target (eventfd or local port delivery) for sending on
1916 * a given event channel.
1918 static int kvm_xen_eventfd_assign(struct kvm *kvm,
1919 struct kvm_xen_hvm_attr *data)
1921 u32 port = data->u.evtchn.send_port;
1922 struct eventfd_ctx *eventfd = NULL;
1923 struct evtchnfd *evtchnfd;
1926 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
1930 switch(data->u.evtchn.type) {
1931 case EVTCHNSTAT_ipi:
1932 /* IPI must map back to the same port# */
1933 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1934 goto out_noeventfd; /* -EINVAL */
1937 case EVTCHNSTAT_interdomain:
1938 if (data->u.evtchn.deliver.port.port) {
1939 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1940 goto out_noeventfd; /* -EINVAL */
1942 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1943 if (IS_ERR(eventfd)) {
1944 ret = PTR_ERR(eventfd);
1950 case EVTCHNSTAT_virq:
1951 case EVTCHNSTAT_closed:
1952 case EVTCHNSTAT_unbound:
1953 case EVTCHNSTAT_pirq:
1954 default: /* Unknown event channel type */
1955 goto out; /* -EINVAL */
1958 evtchnfd->send_port = data->u.evtchn.send_port;
1959 evtchnfd->type = data->u.evtchn.type;
1961 evtchnfd->deliver.eventfd.ctx = eventfd;
1963 /* We only support 2 level event channels for now */
1964 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1965 goto out; /* -EINVAL; */
1967 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
1968 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1969 evtchnfd->deliver.port.vcpu_idx = -1;
1970 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1973 mutex_lock(&kvm->arch.xen.xen_lock);
1974 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1976 mutex_unlock(&kvm->arch.xen.xen_lock);
1984 eventfd_ctx_put(eventfd);
1990 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
1992 struct evtchnfd *evtchnfd;
1994 mutex_lock(&kvm->arch.xen.xen_lock);
1995 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1996 mutex_unlock(&kvm->arch.xen.xen_lock);
2001 synchronize_srcu(&kvm->srcu);
2002 if (!evtchnfd->deliver.port.port)
2003 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2008 static int kvm_xen_eventfd_reset(struct kvm *kvm)
2010 struct evtchnfd *evtchnfd, **all_evtchnfds;
2014 mutex_lock(&kvm->arch.xen.xen_lock);
2017 * Because synchronize_srcu() cannot be called inside the
2018 * critical section, first collect all the evtchnfd objects
2019 * in an array as they are removed from evtchn_ports.
2021 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
2024 all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
2025 if (!all_evtchnfds) {
2026 mutex_unlock(&kvm->arch.xen.xen_lock);
2031 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
2032 all_evtchnfds[n++] = evtchnfd;
2033 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
2035 mutex_unlock(&kvm->arch.xen.xen_lock);
2037 synchronize_srcu(&kvm->srcu);
2040 evtchnfd = all_evtchnfds[n];
2041 if (!evtchnfd->deliver.port.port)
2042 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2045 kfree(all_evtchnfds);
2050 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
2052 u32 port = data->u.evtchn.send_port;
2054 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
2055 return kvm_xen_eventfd_reset(kvm);
2057 if (!port || port >= max_evtchn_port(kvm))
2060 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
2061 return kvm_xen_eventfd_deassign(kvm, port);
2062 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
2063 return kvm_xen_eventfd_update(kvm, data);
2064 if (data->u.evtchn.flags)
2067 return kvm_xen_eventfd_assign(kvm, data);
2070 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
2072 struct evtchnfd *evtchnfd;
2073 struct evtchn_send send;
2074 struct x86_exception e;
2076 /* Sanity check: this structure is the same for 32-bit and 64-bit */
2077 BUILD_BUG_ON(sizeof(send) != 4);
2078 if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
2084 * evtchnfd is protected by kvm->srcu; the idr lookup instead
2085 * is protected by RCU.
2088 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
2093 if (evtchnfd->deliver.port.port) {
2094 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
2095 if (ret < 0 && ret != -ENOTCONN)
2098 eventfd_signal(evtchnfd->deliver.eventfd.ctx);
2105 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
2107 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
2108 vcpu->arch.xen.poll_evtchn = 0;
2110 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
2112 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
2114 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
2116 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
2118 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
2122 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
2124 if (kvm_xen_timer_enabled(vcpu))
2125 kvm_xen_stop_timer(vcpu);
2127 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
2128 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
2129 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
2130 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
2132 del_timer_sync(&vcpu->arch.xen.poll_timer);
2135 void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
2137 struct kvm_cpuid_entry2 *entry;
2140 if (!vcpu->arch.xen.cpuid.base)
2143 function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3);
2144 if (function > vcpu->arch.xen.cpuid.limit)
2147 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
2149 entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul;
2150 entry->edx = vcpu->arch.hv_clock.tsc_shift;
2153 entry = kvm_find_cpuid_entry_index(vcpu, function, 2);
2155 entry->eax = vcpu->arch.hw_tsc_khz;
2158 void kvm_xen_init_vm(struct kvm *kvm)
2160 mutex_init(&kvm->arch.xen.xen_lock);
2161 idr_init(&kvm->arch.xen.evtchn_ports);
2162 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
2165 void kvm_xen_destroy_vm(struct kvm *kvm)
2167 struct evtchnfd *evtchnfd;
2170 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
2172 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
2173 if (!evtchnfd->deliver.port.port)
2174 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2177 idr_destroy(&kvm->arch.xen.evtchn_ports);
2179 if (kvm->arch.xen_hvm_config.msr)
2180 static_branch_slow_dec_deferred(&kvm_xen_enabled);