GNU Linux-libre 4.9.333-gnu1
[releases.git] / arch / x86 / kernel / kvm.c
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/idle.h>
46 #include <asm/apic.h>
47 #include <asm/apicdef.h>
48 #include <asm/hypervisor.h>
49 #include <asm/kvm_guest.h>
50
51 static int kvmapf = 1;
52
53 static int parse_no_kvmapf(char *arg)
54 {
55         kvmapf = 0;
56         return 0;
57 }
58
59 early_param("no-kvmapf", parse_no_kvmapf);
60
61 static int steal_acc = 1;
62 static int parse_no_stealacc(char *arg)
63 {
64         steal_acc = 0;
65         return 0;
66 }
67
68 early_param("no-steal-acc", parse_no_stealacc);
69
70 static int kvmclock_vsyscall = 1;
71 static int parse_no_kvmclock_vsyscall(char *arg)
72 {
73         kvmclock_vsyscall = 0;
74         return 0;
75 }
76
77 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
78
79 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
80 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
81 static int has_steal_clock = 0;
82
83 /*
84  * No need for any "IO delay" on KVM
85  */
86 static void kvm_io_delay(void)
87 {
88 }
89
90 #define KVM_TASK_SLEEP_HASHBITS 8
91 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
92
93 struct kvm_task_sleep_node {
94         struct hlist_node link;
95         struct swait_queue_head wq;
96         u32 token;
97         int cpu;
98         bool halted;
99 };
100
101 static struct kvm_task_sleep_head {
102         raw_spinlock_t lock;
103         struct hlist_head list;
104 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
105
106 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
107                                                   u32 token)
108 {
109         struct hlist_node *p;
110
111         hlist_for_each(p, &b->list) {
112                 struct kvm_task_sleep_node *n =
113                         hlist_entry(p, typeof(*n), link);
114                 if (n->token == token)
115                         return n;
116         }
117
118         return NULL;
119 }
120
121 void kvm_async_pf_task_wait(u32 token)
122 {
123         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
124         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
125         struct kvm_task_sleep_node n, *e;
126         DECLARE_SWAITQUEUE(wait);
127
128         rcu_irq_enter();
129
130         raw_spin_lock(&b->lock);
131         e = _find_apf_task(b, token);
132         if (e) {
133                 /* dummy entry exist -> wake up was delivered ahead of PF */
134                 hlist_del(&e->link);
135                 kfree(e);
136                 raw_spin_unlock(&b->lock);
137
138                 rcu_irq_exit();
139                 return;
140         }
141
142         n.token = token;
143         n.cpu = smp_processor_id();
144         n.halted = is_idle_task(current) || preempt_count() > 1 ||
145                    rcu_preempt_depth();
146         init_swait_queue_head(&n.wq);
147         hlist_add_head(&n.link, &b->list);
148         raw_spin_unlock(&b->lock);
149
150         for (;;) {
151                 if (!n.halted)
152                         prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
153                 if (hlist_unhashed(&n.link))
154                         break;
155
156                 rcu_irq_exit();
157
158                 if (!n.halted) {
159                         local_irq_enable();
160                         schedule();
161                         local_irq_disable();
162                 } else {
163                         /*
164                          * We cannot reschedule. So halt.
165                          */
166                         native_safe_halt();
167                         local_irq_disable();
168                 }
169
170                 rcu_irq_enter();
171         }
172         if (!n.halted)
173                 finish_swait(&n.wq, &wait);
174
175         rcu_irq_exit();
176         return;
177 }
178 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
179
180 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
181 {
182         hlist_del_init(&n->link);
183         if (n->halted)
184                 smp_send_reschedule(n->cpu);
185         else if (swait_active(&n->wq))
186                 swake_up(&n->wq);
187 }
188
189 static void apf_task_wake_all(void)
190 {
191         int i;
192
193         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
194                 struct hlist_node *p, *next;
195                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
196                 raw_spin_lock(&b->lock);
197                 hlist_for_each_safe(p, next, &b->list) {
198                         struct kvm_task_sleep_node *n =
199                                 hlist_entry(p, typeof(*n), link);
200                         if (n->cpu == smp_processor_id())
201                                 apf_task_wake_one(n);
202                 }
203                 raw_spin_unlock(&b->lock);
204         }
205 }
206
207 void kvm_async_pf_task_wake(u32 token)
208 {
209         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
210         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
211         struct kvm_task_sleep_node *n;
212
213         if (token == ~0) {
214                 apf_task_wake_all();
215                 return;
216         }
217
218 again:
219         raw_spin_lock(&b->lock);
220         n = _find_apf_task(b, token);
221         if (!n) {
222                 /*
223                  * async PF was not yet handled.
224                  * Add dummy entry for the token.
225                  */
226                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
227                 if (!n) {
228                         /*
229                          * Allocation failed! Busy wait while other cpu
230                          * handles async PF.
231                          */
232                         raw_spin_unlock(&b->lock);
233                         cpu_relax();
234                         goto again;
235                 }
236                 n->token = token;
237                 n->cpu = smp_processor_id();
238                 init_swait_queue_head(&n->wq);
239                 hlist_add_head(&n->link, &b->list);
240         } else
241                 apf_task_wake_one(n);
242         raw_spin_unlock(&b->lock);
243         return;
244 }
245 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
246
247 u32 kvm_read_and_reset_pf_reason(void)
248 {
249         u32 reason = 0;
250
251         if (__this_cpu_read(apf_reason.enabled)) {
252                 reason = __this_cpu_read(apf_reason.reason);
253                 __this_cpu_write(apf_reason.reason, 0);
254         }
255
256         return reason;
257 }
258 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
259 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
260
261 dotraplinkage void
262 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
263 {
264         enum ctx_state prev_state;
265
266         switch (kvm_read_and_reset_pf_reason()) {
267         default:
268                 trace_do_page_fault(regs, error_code);
269                 break;
270         case KVM_PV_REASON_PAGE_NOT_PRESENT:
271                 /* page is swapped out by the host. */
272                 prev_state = exception_enter();
273                 exit_idle();
274                 kvm_async_pf_task_wait((u32)read_cr2());
275                 exception_exit(prev_state);
276                 break;
277         case KVM_PV_REASON_PAGE_READY:
278                 rcu_irq_enter();
279                 exit_idle();
280                 kvm_async_pf_task_wake((u32)read_cr2());
281                 rcu_irq_exit();
282                 break;
283         }
284 }
285 NOKPROBE_SYMBOL(do_async_page_fault);
286
287 static void __init paravirt_ops_setup(void)
288 {
289         pv_info.name = "KVM";
290
291         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
292                 pv_cpu_ops.io_delay = kvm_io_delay;
293
294 #ifdef CONFIG_X86_IO_APIC
295         no_timer_check = 1;
296 #endif
297 }
298
299 static void kvm_register_steal_time(void)
300 {
301         int cpu = smp_processor_id();
302         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
303
304         if (!has_steal_clock)
305                 return;
306
307         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
308         pr_info("kvm-stealtime: cpu %d, msr %llx\n",
309                 cpu, (unsigned long long) slow_virt_to_phys(st));
310 }
311
312 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
313
314 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
315 {
316         /**
317          * This relies on __test_and_clear_bit to modify the memory
318          * in a way that is atomic with respect to the local CPU.
319          * The hypervisor only accesses this memory from the local CPU so
320          * there's no need for lock or memory barriers.
321          * An optimization barrier is implied in apic write.
322          */
323         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
324                 return;
325         apic_write(APIC_EOI, APIC_EOI_ACK);
326 }
327
328 static void kvm_guest_cpu_init(void)
329 {
330         if (!kvm_para_available())
331                 return;
332
333         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
334                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
335
336 #ifdef CONFIG_PREEMPT
337                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
338 #endif
339                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
340                 __this_cpu_write(apf_reason.enabled, 1);
341                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
342                        smp_processor_id());
343         }
344
345         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
346                 unsigned long pa;
347                 /* Size alignment is implied but just to make it explicit. */
348                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
349                 __this_cpu_write(kvm_apic_eoi, 0);
350                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
351                         | KVM_MSR_ENABLED;
352                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
353         }
354
355         if (has_steal_clock)
356                 kvm_register_steal_time();
357 }
358
359 static void kvm_pv_disable_apf(void)
360 {
361         if (!__this_cpu_read(apf_reason.enabled))
362                 return;
363
364         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
365         __this_cpu_write(apf_reason.enabled, 0);
366
367         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
368                smp_processor_id());
369 }
370
371 static void kvm_pv_guest_cpu_reboot(void *unused)
372 {
373         /*
374          * We disable PV EOI before we load a new kernel by kexec,
375          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
376          * New kernel can re-enable when it boots.
377          */
378         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
379                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
380         kvm_pv_disable_apf();
381         kvm_disable_steal_time();
382 }
383
384 static int kvm_pv_reboot_notify(struct notifier_block *nb,
385                                 unsigned long code, void *unused)
386 {
387         if (code == SYS_RESTART)
388                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
389         return NOTIFY_DONE;
390 }
391
392 static struct notifier_block kvm_pv_reboot_nb = {
393         .notifier_call = kvm_pv_reboot_notify,
394 };
395
396 static u64 kvm_steal_clock(int cpu)
397 {
398         u64 steal;
399         struct kvm_steal_time *src;
400         int version;
401
402         src = &per_cpu(steal_time, cpu);
403         do {
404                 version = src->version;
405                 rmb();
406                 steal = src->steal;
407                 rmb();
408         } while ((version & 1) || (version != src->version));
409
410         return steal;
411 }
412
413 void kvm_disable_steal_time(void)
414 {
415         if (!has_steal_clock)
416                 return;
417
418         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
419 }
420
421 #ifdef CONFIG_SMP
422 static void __init kvm_smp_prepare_boot_cpu(void)
423 {
424         kvm_guest_cpu_init();
425         native_smp_prepare_boot_cpu();
426         kvm_spinlock_init();
427 }
428
429 static void kvm_guest_cpu_offline(void)
430 {
431         kvm_disable_steal_time();
432         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
433                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
434         kvm_pv_disable_apf();
435         apf_task_wake_all();
436 }
437
438 static int kvm_cpu_online(unsigned int cpu)
439 {
440         local_irq_disable();
441         kvm_guest_cpu_init();
442         local_irq_enable();
443         return 0;
444 }
445
446 static int kvm_cpu_down_prepare(unsigned int cpu)
447 {
448         local_irq_disable();
449         kvm_guest_cpu_offline();
450         local_irq_enable();
451         return 0;
452 }
453 #endif
454
455 static void __init kvm_apf_trap_init(void)
456 {
457         set_intr_gate(14, async_page_fault);
458 }
459
460 void __init kvm_guest_init(void)
461 {
462         int i;
463
464         if (!kvm_para_available())
465                 return;
466
467         paravirt_ops_setup();
468         register_reboot_notifier(&kvm_pv_reboot_nb);
469         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
470                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
471         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
472                 x86_init.irqs.trap_init = kvm_apf_trap_init;
473
474         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
475                 has_steal_clock = 1;
476                 pv_time_ops.steal_clock = kvm_steal_clock;
477         }
478
479         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
480                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
481
482         if (kvmclock_vsyscall)
483                 kvm_setup_vsyscall_timeinfo();
484
485 #ifdef CONFIG_SMP
486         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
487         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
488                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
489                 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
490 #else
491         kvm_guest_cpu_init();
492 #endif
493
494         /*
495          * Hard lockup detection is enabled by default. Disable it, as guests
496          * can get false positives too easily, for example if the host is
497          * overcommitted.
498          */
499         hardlockup_detector_disable();
500 }
501
502 static noinline uint32_t __kvm_cpuid_base(void)
503 {
504         if (boot_cpu_data.cpuid_level < 0)
505                 return 0;       /* So we don't blow up on old processors */
506
507         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
508                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
509
510         return 0;
511 }
512
513 static inline uint32_t kvm_cpuid_base(void)
514 {
515         static int kvm_cpuid_base = -1;
516
517         if (kvm_cpuid_base == -1)
518                 kvm_cpuid_base = __kvm_cpuid_base();
519
520         return kvm_cpuid_base;
521 }
522
523 bool kvm_para_available(void)
524 {
525         return kvm_cpuid_base() != 0;
526 }
527 EXPORT_SYMBOL_GPL(kvm_para_available);
528
529 unsigned int kvm_arch_para_features(void)
530 {
531         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
532 }
533
534 static uint32_t __init kvm_detect(void)
535 {
536         return kvm_cpuid_base();
537 }
538
539 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
540         .name                   = "KVM",
541         .detect                 = kvm_detect,
542         .x2apic_available       = kvm_para_available,
543 };
544 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
545
546 static __init int activate_jump_labels(void)
547 {
548         if (has_steal_clock) {
549                 static_key_slow_inc(&paravirt_steal_enabled);
550                 if (steal_acc)
551                         static_key_slow_inc(&paravirt_steal_rq_enabled);
552         }
553
554         return 0;
555 }
556 arch_initcall(activate_jump_labels);
557
558 #ifdef CONFIG_PARAVIRT_SPINLOCKS
559
560 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
561 static void kvm_kick_cpu(int cpu)
562 {
563         int apicid;
564         unsigned long flags = 0;
565
566         apicid = per_cpu(x86_cpu_to_apicid, cpu);
567         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
568 }
569
570 #include <asm/qspinlock.h>
571
572 static void kvm_wait(u8 *ptr, u8 val)
573 {
574         unsigned long flags;
575
576         if (in_nmi())
577                 return;
578
579         local_irq_save(flags);
580
581         if (READ_ONCE(*ptr) != val)
582                 goto out;
583
584         /*
585          * halt until it's our turn and kicked. Note that we do safe halt
586          * for irq enabled case to avoid hang when lock info is overwritten
587          * in irq spinlock slowpath and no spurious interrupt occur to save us.
588          */
589         if (arch_irqs_disabled_flags(flags))
590                 halt();
591         else
592                 safe_halt();
593
594 out:
595         local_irq_restore(flags);
596 }
597
598 /*
599  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
600  */
601 void __init kvm_spinlock_init(void)
602 {
603         if (!kvm_para_available())
604                 return;
605         /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
606         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
607                 return;
608
609         __pv_init_lock_hash();
610         pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
611         pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
612         pv_lock_ops.wait = kvm_wait;
613         pv_lock_ops.kick = kvm_kick_cpu;
614 }
615
616 static __init int kvm_spinlock_init_jump(void)
617 {
618         if (!kvm_para_available())
619                 return 0;
620         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
621                 return 0;
622
623         static_key_slow_inc(&paravirt_ticketlocks_enabled);
624         printk(KERN_INFO "KVM setup paravirtual spinlock\n");
625
626         return 0;
627 }
628 early_initcall(kvm_spinlock_init_jump);
629
630 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */