2 * Copyright (C) 2001 MandrakeSoft S.A.
3 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 * http://www.linux-mandrake.com/
9 * http://www.mandrakesoft.com/
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Yunhong Jiang <yunhong.jiang@intel.com>
26 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
27 * Based on Xen 3.1 code.
30 #include <linux/kvm_host.h>
31 #include <linux/kvm.h>
33 #include <linux/highmem.h>
34 #include <linux/smp.h>
35 #include <linux/hrtimer.h>
37 #include <linux/slab.h>
38 #include <linux/export.h>
39 #include <linux/nospec.h>
40 #include <asm/processor.h>
42 #include <asm/current.h>
43 #include <trace/events/kvm.h>
50 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
52 #define ioapic_debug(fmt, arg...)
54 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
57 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
61 unsigned long result = 0;
63 switch (ioapic->ioregsel) {
64 case IOAPIC_REG_VERSION:
65 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
66 | (IOAPIC_VERSION_ID & 0xff));
69 case IOAPIC_REG_APIC_ID:
70 case IOAPIC_REG_ARB_ID:
71 result = ((ioapic->id & 0xf) << 24);
76 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
77 u64 redir_content = ~0ULL;
79 if (redir_index < IOAPIC_NUM_PINS) {
80 u32 index = array_index_nospec(
81 redir_index, IOAPIC_NUM_PINS);
83 redir_content = ioapic->redirtbl[index].bits;
86 result = (ioapic->ioregsel & 0x1) ?
87 (redir_content >> 32) & 0xffffffff :
88 redir_content & 0xffffffff;
96 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
98 ioapic->rtc_status.pending_eoi = 0;
99 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
102 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
104 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
106 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
107 kvm_rtc_eoi_tracking_restore_all(ioapic);
110 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
112 bool new_val, old_val;
113 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
114 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
115 union kvm_ioapic_redirect_entry *e;
117 e = &ioapic->redirtbl[RTC_GSI];
118 if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
119 e->fields.dest_mode))
122 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
123 old_val = test_bit(vcpu->vcpu_id, dest_map->map);
125 if (new_val == old_val)
129 __set_bit(vcpu->vcpu_id, dest_map->map);
130 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
131 ioapic->rtc_status.pending_eoi++;
133 __clear_bit(vcpu->vcpu_id, dest_map->map);
134 ioapic->rtc_status.pending_eoi--;
135 rtc_status_pending_eoi_check_valid(ioapic);
139 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
141 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
143 spin_lock(&ioapic->lock);
144 __rtc_irq_eoi_tracking_restore_one(vcpu);
145 spin_unlock(&ioapic->lock);
148 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
150 struct kvm_vcpu *vcpu;
153 if (RTC_GSI >= IOAPIC_NUM_PINS)
156 rtc_irq_eoi_tracking_reset(ioapic);
157 kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
158 __rtc_irq_eoi_tracking_restore_one(vcpu);
161 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
163 if (test_and_clear_bit(vcpu->vcpu_id,
164 ioapic->rtc_status.dest_map.map)) {
165 --ioapic->rtc_status.pending_eoi;
166 rtc_status_pending_eoi_check_valid(ioapic);
170 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
172 if (ioapic->rtc_status.pending_eoi > 0)
173 return true; /* coalesced */
178 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
179 int irq_level, bool line_status)
181 union kvm_ioapic_redirect_entry entry;
186 entry = ioapic->redirtbl[irq];
187 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
190 ioapic->irr &= ~mask;
196 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
197 * this only happens if a previous edge has not been delivered due
198 * do masking. For level interrupts, the remote_irr field tells
199 * us if the interrupt is waiting for an EOI.
201 * RTC is special: it is edge-triggered, but userspace likes to know
202 * if it has been already ack-ed via EOI because coalesced RTC
203 * interrupts lead to time drift in Windows guests. So we track
204 * EOI manually for the RTC interrupt.
206 if (irq == RTC_GSI && line_status &&
207 rtc_irq_check_coalesced(ioapic)) {
212 old_irr = ioapic->irr;
215 ioapic->irr_delivered &= ~mask;
216 if ((edge && old_irr == ioapic->irr) ||
217 (!edge && entry.fields.remote_irr)) {
222 ret = ioapic_service(ioapic, irq, line_status);
225 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
229 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
233 rtc_irq_eoi_tracking_reset(ioapic);
234 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
235 ioapic_set_irq(ioapic, idx, 1, true);
237 kvm_rtc_eoi_tracking_restore_all(ioapic);
241 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
243 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
244 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
245 union kvm_ioapic_redirect_entry *e;
248 spin_lock(&ioapic->lock);
250 /* Make sure we see any missing RTC EOI */
251 if (test_bit(vcpu->vcpu_id, dest_map->map))
252 __set_bit(dest_map->vectors[vcpu->vcpu_id],
253 ioapic_handled_vectors);
255 for (index = 0; index < IOAPIC_NUM_PINS; index++) {
256 e = &ioapic->redirtbl[index];
257 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
258 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
260 if (kvm_apic_match_dest(vcpu, NULL, 0,
261 e->fields.dest_id, e->fields.dest_mode) ||
262 kvm_apic_pending_eoi(vcpu, e->fields.vector))
263 __set_bit(e->fields.vector,
264 ioapic_handled_vectors);
267 spin_unlock(&ioapic->lock);
270 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
272 if (!ioapic_in_kernel(kvm))
274 kvm_make_scan_ioapic_request(kvm);
277 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
280 bool mask_before, mask_after;
281 int old_remote_irr, old_delivery_status;
282 union kvm_ioapic_redirect_entry *e;
284 switch (ioapic->ioregsel) {
285 case IOAPIC_REG_VERSION:
286 /* Writes are ignored. */
289 case IOAPIC_REG_APIC_ID:
290 ioapic->id = (val >> 24) & 0xf;
293 case IOAPIC_REG_ARB_ID:
297 index = (ioapic->ioregsel - 0x10) >> 1;
299 ioapic_debug("change redir index %x val %x\n", index, val);
300 if (index >= IOAPIC_NUM_PINS)
302 index = array_index_nospec(index, IOAPIC_NUM_PINS);
303 e = &ioapic->redirtbl[index];
304 mask_before = e->fields.mask;
305 /* Preserve read-only fields */
306 old_remote_irr = e->fields.remote_irr;
307 old_delivery_status = e->fields.delivery_status;
308 if (ioapic->ioregsel & 1) {
309 e->bits &= 0xffffffff;
310 e->bits |= (u64) val << 32;
312 e->bits &= ~0xffffffffULL;
313 e->bits |= (u32) val;
315 e->fields.remote_irr = old_remote_irr;
316 e->fields.delivery_status = old_delivery_status;
319 * Some OSes (Linux, Xen) assume that Remote IRR bit will
320 * be cleared by IOAPIC hardware when the entry is configured
321 * as edge-triggered. This behavior is used to simulate an
322 * explicit EOI on IOAPICs that don't have the EOI register.
324 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
325 e->fields.remote_irr = 0;
327 mask_after = e->fields.mask;
328 if (mask_before != mask_after)
329 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
330 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
331 && ioapic->irr & (1 << index))
332 ioapic_service(ioapic, index, false);
333 kvm_make_scan_ioapic_request(ioapic->kvm);
338 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
340 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
341 struct kvm_lapic_irq irqe;
344 if (entry->fields.mask)
347 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
348 "vector=%x trig_mode=%x\n",
349 entry->fields.dest_id, entry->fields.dest_mode,
350 entry->fields.delivery_mode, entry->fields.vector,
351 entry->fields.trig_mode);
353 irqe.dest_id = entry->fields.dest_id;
354 irqe.vector = entry->fields.vector;
355 irqe.dest_mode = entry->fields.dest_mode;
356 irqe.trig_mode = entry->fields.trig_mode;
357 irqe.delivery_mode = entry->fields.delivery_mode << 8;
360 irqe.msi_redir_hint = false;
362 if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
363 ioapic->irr_delivered |= 1 << irq;
365 if (irq == RTC_GSI && line_status) {
367 * pending_eoi cannot ever become negative (see
368 * rtc_status_pending_eoi_check_valid) and the caller
369 * ensures that it is only called if it is >= zero, namely
370 * if rtc_irq_check_coalesced returns false).
372 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
373 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
374 &ioapic->rtc_status.dest_map);
375 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
377 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
379 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
380 entry->fields.remote_irr = 1;
385 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
386 int level, bool line_status)
390 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
392 spin_lock(&ioapic->lock);
393 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
394 irq_source_id, level);
395 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
397 spin_unlock(&ioapic->lock);
402 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
406 spin_lock(&ioapic->lock);
407 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
408 __clear_bit(irq_source_id, &ioapic->irq_states[i]);
409 spin_unlock(&ioapic->lock);
412 static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
415 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
417 spin_lock(&ioapic->lock);
418 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
419 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
421 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
424 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
425 ioapic_service(ioapic, i, false);
427 spin_unlock(&ioapic->lock);
430 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
432 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
433 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
435 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
436 struct kvm_lapic *apic = vcpu->arch.apic;
439 /* RTC special handling */
440 if (test_bit(vcpu->vcpu_id, dest_map->map) &&
441 vector == dest_map->vectors[vcpu->vcpu_id])
442 rtc_irq_eoi(ioapic, vcpu);
444 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
445 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
447 if (ent->fields.vector != vector)
451 * We are dropping lock while calling ack notifiers because ack
452 * notifier callbacks for assigned devices call into IOAPIC
453 * recursively. Since remote_irr is cleared only after call
454 * to notifiers if the same vector will be delivered while lock
455 * is dropped it will be put into irr and will be delivered
456 * after ack notifier returns.
458 spin_unlock(&ioapic->lock);
459 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
460 spin_lock(&ioapic->lock);
462 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
463 kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
466 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
467 ent->fields.remote_irr = 0;
468 if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
469 ++ioapic->irq_eoi[i];
470 if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
472 * Real hardware does not deliver the interrupt
473 * immediately during eoi broadcast, and this
474 * lets a buggy guest make slow progress
475 * even if it does not correctly handle a
476 * level-triggered interrupt. Emulate this
477 * behavior if we detect an interrupt storm.
479 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
480 ioapic->irq_eoi[i] = 0;
481 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
483 ioapic_service(ioapic, i, false);
486 ioapic->irq_eoi[i] = 0;
491 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
493 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
495 spin_lock(&ioapic->lock);
496 __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
497 spin_unlock(&ioapic->lock);
500 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
502 return container_of(dev, struct kvm_ioapic, dev);
505 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
507 return ((addr >= ioapic->base_address &&
508 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
511 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
512 gpa_t addr, int len, void *val)
514 struct kvm_ioapic *ioapic = to_ioapic(this);
516 if (!ioapic_in_range(ioapic, addr))
519 ioapic_debug("addr %lx\n", (unsigned long)addr);
520 ASSERT(!(addr & 0xf)); /* check alignment */
523 spin_lock(&ioapic->lock);
525 case IOAPIC_REG_SELECT:
526 result = ioapic->ioregsel;
529 case IOAPIC_REG_WINDOW:
530 result = ioapic_read_indirect(ioapic, addr, len);
537 spin_unlock(&ioapic->lock);
541 *(u64 *) val = result;
546 memcpy(val, (char *)&result, len);
549 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
554 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
555 gpa_t addr, int len, const void *val)
557 struct kvm_ioapic *ioapic = to_ioapic(this);
559 if (!ioapic_in_range(ioapic, addr))
562 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
563 (void*)addr, len, val);
564 ASSERT(!(addr & 0xf)); /* check alignment */
578 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
583 spin_lock(&ioapic->lock);
585 case IOAPIC_REG_SELECT:
586 ioapic->ioregsel = data & 0xFF; /* 8-bit register */
589 case IOAPIC_REG_WINDOW:
590 ioapic_write_indirect(ioapic, data);
596 spin_unlock(&ioapic->lock);
600 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
604 cancel_delayed_work_sync(&ioapic->eoi_inject);
605 for (i = 0; i < IOAPIC_NUM_PINS; i++)
606 ioapic->redirtbl[i].fields.mask = 1;
607 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
608 ioapic->ioregsel = 0;
610 ioapic->irr_delivered = 0;
612 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
613 rtc_irq_eoi_tracking_reset(ioapic);
616 static const struct kvm_io_device_ops ioapic_mmio_ops = {
617 .read = ioapic_mmio_read,
618 .write = ioapic_mmio_write,
621 int kvm_ioapic_init(struct kvm *kvm)
623 struct kvm_ioapic *ioapic;
626 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
629 spin_lock_init(&ioapic->lock);
630 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
631 kvm->arch.vioapic = ioapic;
632 kvm_ioapic_reset(ioapic);
633 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
635 mutex_lock(&kvm->slots_lock);
636 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
637 IOAPIC_MEM_LENGTH, &ioapic->dev);
638 mutex_unlock(&kvm->slots_lock);
640 kvm->arch.vioapic = NULL;
647 void kvm_ioapic_destroy(struct kvm *kvm)
649 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
654 cancel_delayed_work_sync(&ioapic->eoi_inject);
655 mutex_lock(&kvm->slots_lock);
656 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
657 mutex_unlock(&kvm->slots_lock);
658 kvm->arch.vioapic = NULL;
662 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
664 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
666 spin_lock(&ioapic->lock);
667 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
668 state->irr &= ~ioapic->irr_delivered;
669 spin_unlock(&ioapic->lock);
672 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
674 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
676 spin_lock(&ioapic->lock);
677 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
679 ioapic->irr_delivered = 0;
680 kvm_make_scan_ioapic_request(kvm);
681 kvm_ioapic_inject_all(ioapic, state->irr);
682 spin_unlock(&ioapic->lock);