2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
42 #include <asm/traps.h>
44 #include <asm/tlbflush.h>
47 #include <asm/apicdef.h>
48 #include <asm/hypervisor.h>
49 #include <asm/kvm_guest.h>
51 static int kvmapf = 1;
53 static int parse_no_kvmapf(char *arg)
59 early_param("no-kvmapf", parse_no_kvmapf);
61 static int steal_acc = 1;
62 static int parse_no_stealacc(char *arg)
68 early_param("no-steal-acc", parse_no_stealacc);
70 static int kvmclock_vsyscall = 1;
71 static int parse_no_kvmclock_vsyscall(char *arg)
73 kvmclock_vsyscall = 0;
77 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
79 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
80 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
81 static int has_steal_clock = 0;
84 * No need for any "IO delay" on KVM
86 static void kvm_io_delay(void)
90 #define KVM_TASK_SLEEP_HASHBITS 8
91 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
93 struct kvm_task_sleep_node {
94 struct hlist_node link;
95 struct swait_queue_head wq;
101 static struct kvm_task_sleep_head {
103 struct hlist_head list;
104 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
106 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
109 struct hlist_node *p;
111 hlist_for_each(p, &b->list) {
112 struct kvm_task_sleep_node *n =
113 hlist_entry(p, typeof(*n), link);
114 if (n->token == token)
121 void kvm_async_pf_task_wait(u32 token)
123 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
124 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
125 struct kvm_task_sleep_node n, *e;
126 DECLARE_SWAITQUEUE(wait);
130 raw_spin_lock(&b->lock);
131 e = _find_apf_task(b, token);
133 /* dummy entry exist -> wake up was delivered ahead of PF */
136 raw_spin_unlock(&b->lock);
143 n.cpu = smp_processor_id();
144 n.halted = is_idle_task(current) || preempt_count() > 1 ||
146 init_swait_queue_head(&n.wq);
147 hlist_add_head(&n.link, &b->list);
148 raw_spin_unlock(&b->lock);
152 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
153 if (hlist_unhashed(&n.link))
164 * We cannot reschedule. So halt.
173 finish_swait(&n.wq, &wait);
178 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
180 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
182 hlist_del_init(&n->link);
184 smp_send_reschedule(n->cpu);
185 else if (swait_active(&n->wq))
189 static void apf_task_wake_all(void)
193 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
194 struct hlist_node *p, *next;
195 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
196 raw_spin_lock(&b->lock);
197 hlist_for_each_safe(p, next, &b->list) {
198 struct kvm_task_sleep_node *n =
199 hlist_entry(p, typeof(*n), link);
200 if (n->cpu == smp_processor_id())
201 apf_task_wake_one(n);
203 raw_spin_unlock(&b->lock);
207 void kvm_async_pf_task_wake(u32 token)
209 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
210 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
211 struct kvm_task_sleep_node *n;
219 raw_spin_lock(&b->lock);
220 n = _find_apf_task(b, token);
223 * async PF was not yet handled.
224 * Add dummy entry for the token.
226 n = kzalloc(sizeof(*n), GFP_ATOMIC);
229 * Allocation failed! Busy wait while other cpu
232 raw_spin_unlock(&b->lock);
237 n->cpu = smp_processor_id();
238 init_swait_queue_head(&n->wq);
239 hlist_add_head(&n->link, &b->list);
241 apf_task_wake_one(n);
242 raw_spin_unlock(&b->lock);
245 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
247 u32 kvm_read_and_reset_pf_reason(void)
251 if (__this_cpu_read(apf_reason.enabled)) {
252 reason = __this_cpu_read(apf_reason.reason);
253 __this_cpu_write(apf_reason.reason, 0);
258 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
259 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
262 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
264 enum ctx_state prev_state;
266 switch (kvm_read_and_reset_pf_reason()) {
268 trace_do_page_fault(regs, error_code);
270 case KVM_PV_REASON_PAGE_NOT_PRESENT:
271 /* page is swapped out by the host. */
272 prev_state = exception_enter();
274 kvm_async_pf_task_wait((u32)read_cr2());
275 exception_exit(prev_state);
277 case KVM_PV_REASON_PAGE_READY:
280 kvm_async_pf_task_wake((u32)read_cr2());
285 NOKPROBE_SYMBOL(do_async_page_fault);
287 static void __init paravirt_ops_setup(void)
289 pv_info.name = "KVM";
291 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
292 pv_cpu_ops.io_delay = kvm_io_delay;
294 #ifdef CONFIG_X86_IO_APIC
299 static void kvm_register_steal_time(void)
301 int cpu = smp_processor_id();
302 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
304 if (!has_steal_clock)
307 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
308 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
309 cpu, (unsigned long long) slow_virt_to_phys(st));
312 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
314 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
317 * This relies on __test_and_clear_bit to modify the memory
318 * in a way that is atomic with respect to the local CPU.
319 * The hypervisor only accesses this memory from the local CPU so
320 * there's no need for lock or memory barriers.
321 * An optimization barrier is implied in apic write.
323 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
325 apic_write(APIC_EOI, APIC_EOI_ACK);
328 static void kvm_guest_cpu_init(void)
330 if (!kvm_para_available())
333 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
334 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
336 #ifdef CONFIG_PREEMPT
337 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
339 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
340 __this_cpu_write(apf_reason.enabled, 1);
341 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
345 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
347 /* Size alignment is implied but just to make it explicit. */
348 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
349 __this_cpu_write(kvm_apic_eoi, 0);
350 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
352 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
356 kvm_register_steal_time();
359 static void kvm_pv_disable_apf(void)
361 if (!__this_cpu_read(apf_reason.enabled))
364 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
365 __this_cpu_write(apf_reason.enabled, 0);
367 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
371 static void kvm_pv_guest_cpu_reboot(void *unused)
374 * We disable PV EOI before we load a new kernel by kexec,
375 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
376 * New kernel can re-enable when it boots.
378 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
379 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
380 kvm_pv_disable_apf();
381 kvm_disable_steal_time();
384 static int kvm_pv_reboot_notify(struct notifier_block *nb,
385 unsigned long code, void *unused)
387 if (code == SYS_RESTART)
388 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
392 static struct notifier_block kvm_pv_reboot_nb = {
393 .notifier_call = kvm_pv_reboot_notify,
396 static u64 kvm_steal_clock(int cpu)
399 struct kvm_steal_time *src;
402 src = &per_cpu(steal_time, cpu);
404 version = src->version;
408 } while ((version & 1) || (version != src->version));
413 void kvm_disable_steal_time(void)
415 if (!has_steal_clock)
418 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
422 static void __init kvm_smp_prepare_boot_cpu(void)
424 kvm_guest_cpu_init();
425 native_smp_prepare_boot_cpu();
429 static void kvm_guest_cpu_offline(void)
431 kvm_disable_steal_time();
432 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
433 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
434 kvm_pv_disable_apf();
438 static int kvm_cpu_online(unsigned int cpu)
441 kvm_guest_cpu_init();
446 static int kvm_cpu_down_prepare(unsigned int cpu)
449 kvm_guest_cpu_offline();
455 static void __init kvm_apf_trap_init(void)
457 set_intr_gate(14, async_page_fault);
460 void __init kvm_guest_init(void)
464 if (!kvm_para_available())
467 paravirt_ops_setup();
468 register_reboot_notifier(&kvm_pv_reboot_nb);
469 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
470 raw_spin_lock_init(&async_pf_sleepers[i].lock);
471 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
472 x86_init.irqs.trap_init = kvm_apf_trap_init;
474 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
476 pv_time_ops.steal_clock = kvm_steal_clock;
479 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
480 apic_set_eoi_write(kvm_guest_apic_eoi_write);
482 if (kvmclock_vsyscall)
483 kvm_setup_vsyscall_timeinfo();
486 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
487 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
488 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
489 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
491 kvm_guest_cpu_init();
495 * Hard lockup detection is enabled by default. Disable it, as guests
496 * can get false positives too easily, for example if the host is
499 hardlockup_detector_disable();
502 static noinline uint32_t __kvm_cpuid_base(void)
504 if (boot_cpu_data.cpuid_level < 0)
505 return 0; /* So we don't blow up on old processors */
507 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
508 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
513 static inline uint32_t kvm_cpuid_base(void)
515 static int kvm_cpuid_base = -1;
517 if (kvm_cpuid_base == -1)
518 kvm_cpuid_base = __kvm_cpuid_base();
520 return kvm_cpuid_base;
523 bool kvm_para_available(void)
525 return kvm_cpuid_base() != 0;
527 EXPORT_SYMBOL_GPL(kvm_para_available);
529 unsigned int kvm_arch_para_features(void)
531 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
534 static uint32_t __init kvm_detect(void)
536 return kvm_cpuid_base();
539 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
541 .detect = kvm_detect,
542 .x2apic_available = kvm_para_available,
544 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
546 static __init int activate_jump_labels(void)
548 if (has_steal_clock) {
549 static_key_slow_inc(¶virt_steal_enabled);
551 static_key_slow_inc(¶virt_steal_rq_enabled);
556 arch_initcall(activate_jump_labels);
558 #ifdef CONFIG_PARAVIRT_SPINLOCKS
560 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
561 static void kvm_kick_cpu(int cpu)
564 unsigned long flags = 0;
566 apicid = per_cpu(x86_cpu_to_apicid, cpu);
567 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
570 #include <asm/qspinlock.h>
572 static void kvm_wait(u8 *ptr, u8 val)
579 local_irq_save(flags);
581 if (READ_ONCE(*ptr) != val)
585 * halt until it's our turn and kicked. Note that we do safe halt
586 * for irq enabled case to avoid hang when lock info is overwritten
587 * in irq spinlock slowpath and no spurious interrupt occur to save us.
589 if (arch_irqs_disabled_flags(flags))
595 local_irq_restore(flags);
599 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
601 void __init kvm_spinlock_init(void)
603 if (!kvm_para_available())
605 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
606 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
609 __pv_init_lock_hash();
610 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
611 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
612 pv_lock_ops.wait = kvm_wait;
613 pv_lock_ops.kick = kvm_kick_cpu;
616 static __init int kvm_spinlock_init_jump(void)
618 if (!kvm_para_available())
620 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
623 static_key_slow_inc(¶virt_ticketlocks_enabled);
624 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
628 early_initcall(kvm_spinlock_init_jump);
630 #endif /* CONFIG_PARAVIRT_SPINLOCKS */