2 * Copyright (C) 2015, 2016 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kvm.h>
18 #include <linux/kvm_host.h>
19 #include <linux/list_sort.h>
23 #define CREATE_TRACE_POINTS
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
29 #define DEBUG_SPINLOCK_BUG_ON(p)
32 struct vgic_global __section(.hyp.text) kvm_vgic_global_state = {.gicv3_cpuif = STATIC_KEY_FALSE_INIT,};
35 * Locking order is always:
36 * its->cmd_lock (mutex)
37 * its->its_lock (mutex)
38 * vgic_cpu->ap_list_lock
42 * If you need to take multiple locks, always take the upper lock first,
43 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
44 * If you are already holding a lock and need to take a higher one, you
45 * have to drop the lower ranking lock first and re-aquire it after having
46 * taken the upper one.
48 * When taking more than one ap_list_lock at the same time, always take the
49 * lowest numbered VCPU's ap_list_lock first, so:
50 * vcpuX->vcpu_id < vcpuY->vcpu_id:
51 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
52 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
56 * Iterate over the VM's list of mapped LPIs to find the one with a
57 * matching interrupt ID and return a reference to the IRQ structure.
59 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
61 struct vgic_dist *dist = &kvm->arch.vgic;
62 struct vgic_irq *irq = NULL;
64 spin_lock(&dist->lpi_list_lock);
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
67 if (irq->intid != intid)
71 * This increases the refcount, the caller is expected to
72 * call vgic_put_irq() later once it's finished with the IRQ.
74 vgic_get_irq_kref(irq);
80 spin_unlock(&dist->lpi_list_lock);
86 * This looks up the virtual interrupt ID to get the corresponding
87 * struct vgic_irq. It also increases the refcount, so any caller is expected
88 * to call vgic_put_irq() once it's finished with this IRQ.
90 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
94 if (intid <= VGIC_MAX_PRIVATE)
95 return &vcpu->arch.vgic_cpu.private_irqs[intid];
98 if (intid <= VGIC_MAX_SPI)
99 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
102 if (intid >= VGIC_MIN_LPI)
103 return vgic_get_lpi(kvm, intid);
105 WARN(1, "Looking up struct vgic_irq for reserved INTID");
110 * We can't do anything in here, because we lack the kvm pointer to
111 * lock and remove the item from the lpi_list. So we keep this function
112 * empty and use the return value of kref_put() to trigger the freeing.
114 static void vgic_irq_release(struct kref *ref)
118 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
120 struct vgic_dist *dist = &kvm->arch.vgic;
122 if (irq->intid < VGIC_MIN_LPI)
125 spin_lock(&dist->lpi_list_lock);
126 if (!kref_put(&irq->refcount, vgic_irq_release)) {
127 spin_unlock(&dist->lpi_list_lock);
131 list_del(&irq->lpi_list);
132 dist->lpi_list_count--;
133 spin_unlock(&dist->lpi_list_lock);
139 * kvm_vgic_target_oracle - compute the target vcpu for an irq
141 * @irq: The irq to route. Must be already locked.
143 * Based on the current state of the interrupt (enabled, pending,
144 * active, vcpu and target_vcpu), compute the next vcpu this should be
145 * given to. Return NULL if this shouldn't be injected at all.
147 * Requires the IRQ lock to be held.
149 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
151 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
153 /* If the interrupt is active, it must stay on the current vcpu */
155 return irq->vcpu ? : irq->target_vcpu;
158 * If the IRQ is not active but enabled and pending, we should direct
159 * it to its configured target VCPU.
160 * If the distributor is disabled, pending interrupts shouldn't be
163 if (irq->enabled && irq->pending) {
164 if (unlikely(irq->target_vcpu &&
165 !irq->target_vcpu->kvm->arch.vgic.enabled))
168 return irq->target_vcpu;
171 /* If neither active nor pending and enabled, then this IRQ should not
172 * be queued to any VCPU.
178 * The order of items in the ap_lists defines how we'll pack things in LRs as
179 * well, the first items in the list being the first things populated in the
182 * A hard rule is that active interrupts can never be pushed out of the LRs
183 * (and therefore take priority) since we cannot reliably trap on deactivation
184 * of IRQs and therefore they have to be present in the LRs.
186 * Otherwise things should be sorted by the priority field and the GIC
187 * hardware support will take care of preemption of priority groups etc.
189 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
190 * to sort "b" before "a".
192 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
194 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
195 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
200 * list_sort may call this function with the same element when
201 * the list is fairly long.
203 if (unlikely(irqa == irqb))
206 spin_lock(&irqa->irq_lock);
207 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
209 if (irqa->active || irqb->active) {
210 ret = (int)irqb->active - (int)irqa->active;
214 penda = irqa->enabled && irqa->pending;
215 pendb = irqb->enabled && irqb->pending;
217 if (!penda || !pendb) {
218 ret = (int)pendb - (int)penda;
222 /* Both pending and enabled, sort by priority */
223 ret = irqa->priority - irqb->priority;
225 spin_unlock(&irqb->irq_lock);
226 spin_unlock(&irqa->irq_lock);
230 /* Must be called with the ap_list_lock held */
231 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
233 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
235 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
237 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
241 * Only valid injection if changing level for level-triggered IRQs or for a
244 static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
246 switch (irq->config) {
247 case VGIC_CONFIG_LEVEL:
248 return irq->line_level != level;
249 case VGIC_CONFIG_EDGE:
257 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
258 * Do the queuing if necessary, taking the right locks in the right order.
259 * Returns true when the IRQ was queued, false otherwise.
261 * Needs to be entered with the IRQ lock already held, but will return
262 * with all locks dropped.
264 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
266 struct kvm_vcpu *vcpu;
268 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
271 vcpu = vgic_target_oracle(irq);
272 if (irq->vcpu || !vcpu) {
274 * If this IRQ is already on a VCPU's ap_list, then it
275 * cannot be moved or modified and there is no more work for
278 * Otherwise, if the irq is not pending and enabled, it does
279 * not need to be inserted into an ap_list and there is also
280 * no more work for us to do.
282 spin_unlock(&irq->irq_lock);
285 * We have to kick the VCPU here, because we could be
286 * queueing an edge-triggered interrupt for which we
287 * get no EOI maintenance interrupt. In that case,
288 * while the IRQ is already on the VCPU's AP list, the
289 * VCPU could have EOI'ed the original interrupt and
290 * won't see this one until it exits for some other
299 * We must unlock the irq lock to take the ap_list_lock where
300 * we are going to insert this new pending interrupt.
302 spin_unlock(&irq->irq_lock);
304 /* someone can do stuff here, which we re-check below */
306 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
307 spin_lock(&irq->irq_lock);
310 * Did something change behind our backs?
312 * There are two cases:
313 * 1) The irq lost its pending state or was disabled behind our
314 * backs and/or it was queued to another VCPU's ap_list.
315 * 2) Someone changed the affinity on this irq behind our
316 * backs and we are now holding the wrong ap_list_lock.
318 * In both cases, drop the locks and retry.
321 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
322 spin_unlock(&irq->irq_lock);
323 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
325 spin_lock(&irq->irq_lock);
330 * Grab a reference to the irq to reflect the fact that it is
331 * now in the ap_list.
333 vgic_get_irq_kref(irq);
334 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
337 spin_unlock(&irq->irq_lock);
338 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
345 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
346 unsigned int intid, bool level,
349 struct kvm_vcpu *vcpu;
350 struct vgic_irq *irq;
353 trace_vgic_update_irq_pending(cpuid, intid, level);
355 ret = vgic_lazy_init(kvm);
359 vcpu = kvm_get_vcpu(kvm, cpuid);
360 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
363 irq = vgic_get_irq(kvm, vcpu, intid);
367 if (irq->hw != mapped_irq) {
368 vgic_put_irq(kvm, irq);
372 spin_lock(&irq->irq_lock);
374 if (!vgic_validate_injection(irq, level)) {
375 /* Nothing to see here, move along... */
376 spin_unlock(&irq->irq_lock);
377 vgic_put_irq(kvm, irq);
381 if (irq->config == VGIC_CONFIG_LEVEL) {
382 irq->line_level = level;
383 irq->pending = level || irq->soft_pending;
388 vgic_queue_irq_unlock(kvm, irq);
389 vgic_put_irq(kvm, irq);
395 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
396 * @kvm: The VM structure pointer
397 * @cpuid: The CPU for PPIs
398 * @intid: The INTID to inject a new state to.
399 * @level: Edge-triggered: true: to trigger the interrupt
400 * false: to ignore the call
401 * Level-sensitive true: raise the input signal
402 * false: lower the input signal
404 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
405 * level-sensitive interrupts. You can think of the level parameter as 1
406 * being HIGH and 0 being LOW and all devices being active-HIGH.
408 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
411 return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
414 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
417 return vgic_update_irq_pending(kvm, cpuid, intid, level, true);
420 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
422 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
426 spin_lock(&irq->irq_lock);
429 irq->hwintid = phys_irq;
431 spin_unlock(&irq->irq_lock);
432 vgic_put_irq(vcpu->kvm, irq);
437 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
439 struct vgic_irq *irq;
441 if (!vgic_initialized(vcpu->kvm))
444 irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
447 spin_lock(&irq->irq_lock);
452 spin_unlock(&irq->irq_lock);
453 vgic_put_irq(vcpu->kvm, irq);
459 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
461 * @vcpu: The VCPU pointer
463 * Go over the list of "interesting" interrupts, and prune those that we
464 * won't have to consider in the near future.
466 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
468 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
469 struct vgic_irq *irq, *tmp;
472 spin_lock(&vgic_cpu->ap_list_lock);
474 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
475 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
477 spin_lock(&irq->irq_lock);
479 BUG_ON(vcpu != irq->vcpu);
481 target_vcpu = vgic_target_oracle(irq);
485 * We don't need to process this interrupt any
486 * further, move it off the list.
488 list_del(&irq->ap_list);
490 spin_unlock(&irq->irq_lock);
493 * This vgic_put_irq call matches the
494 * vgic_get_irq_kref in vgic_queue_irq_unlock,
495 * where we added the LPI to the ap_list. As
496 * we remove the irq from the list, we drop
497 * also drop the refcount.
499 vgic_put_irq(vcpu->kvm, irq);
503 if (target_vcpu == vcpu) {
504 /* We're on the right CPU */
505 spin_unlock(&irq->irq_lock);
509 /* This interrupt looks like it has to be migrated. */
511 spin_unlock(&irq->irq_lock);
512 spin_unlock(&vgic_cpu->ap_list_lock);
515 * Ensure locking order by always locking the smallest
518 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
526 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
527 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
528 SINGLE_DEPTH_NESTING);
529 spin_lock(&irq->irq_lock);
532 * If the affinity has been preserved, move the
533 * interrupt around. Otherwise, it means things have
534 * changed while the interrupt was unlocked, and we
535 * need to replay this.
537 * In all cases, we cannot trust the list not to have
538 * changed, so we restart from the beginning.
540 if (target_vcpu == vgic_target_oracle(irq)) {
541 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
543 list_del(&irq->ap_list);
544 irq->vcpu = target_vcpu;
545 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
548 spin_unlock(&irq->irq_lock);
549 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
550 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
554 spin_unlock(&vgic_cpu->ap_list_lock);
557 static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
559 if (kvm_vgic_global_state.type == VGIC_V2)
560 vgic_v2_process_maintenance(vcpu);
562 vgic_v3_process_maintenance(vcpu);
565 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
567 if (kvm_vgic_global_state.type == VGIC_V2)
568 vgic_v2_fold_lr_state(vcpu);
570 vgic_v3_fold_lr_state(vcpu);
573 /* Requires the irq_lock to be held. */
574 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
575 struct vgic_irq *irq, int lr)
577 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
579 if (kvm_vgic_global_state.type == VGIC_V2)
580 vgic_v2_populate_lr(vcpu, irq, lr);
582 vgic_v3_populate_lr(vcpu, irq, lr);
585 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
587 if (kvm_vgic_global_state.type == VGIC_V2)
588 vgic_v2_clear_lr(vcpu, lr);
590 vgic_v3_clear_lr(vcpu, lr);
593 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
595 if (kvm_vgic_global_state.type == VGIC_V2)
596 vgic_v2_set_underflow(vcpu);
598 vgic_v3_set_underflow(vcpu);
601 /* Requires the ap_list_lock to be held. */
602 static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
604 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
605 struct vgic_irq *irq;
608 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
610 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
611 spin_lock(&irq->irq_lock);
612 /* GICv2 SGIs can count for more than one... */
613 if (vgic_irq_is_sgi(irq->intid) && irq->source)
614 count += hweight8(irq->source);
617 spin_unlock(&irq->irq_lock);
622 /* Requires the VCPU's ap_list_lock to be held. */
623 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
625 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
626 struct vgic_irq *irq;
629 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
631 if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
632 vgic_set_underflow(vcpu);
633 vgic_sort_ap_list(vcpu);
636 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
637 spin_lock(&irq->irq_lock);
639 if (unlikely(vgic_target_oracle(irq) != vcpu))
643 * If we get an SGI with multiple sources, try to get
644 * them in all at once.
647 vgic_populate_lr(vcpu, irq, count++);
648 } while (irq->source && count < kvm_vgic_global_state.nr_lr);
651 spin_unlock(&irq->irq_lock);
653 if (count == kvm_vgic_global_state.nr_lr)
657 vcpu->arch.vgic_cpu.used_lrs = count;
659 /* Nuke remaining LRs */
660 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
661 vgic_clear_lr(vcpu, count);
664 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
665 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
667 if (unlikely(!vgic_initialized(vcpu->kvm)))
670 vgic_process_maintenance_interrupt(vcpu);
671 vgic_fold_lr_state(vcpu);
672 vgic_prune_ap_list(vcpu);
675 /* Flush our emulation state into the GIC hardware before entering the guest. */
676 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
678 if (unlikely(!vgic_initialized(vcpu->kvm)))
681 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
682 vgic_flush_lr_state(vcpu);
683 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
686 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
688 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
689 struct vgic_irq *irq;
690 bool pending = false;
692 if (!vcpu->kvm->arch.vgic.enabled)
695 spin_lock(&vgic_cpu->ap_list_lock);
697 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
698 spin_lock(&irq->irq_lock);
699 pending = irq->pending && irq->enabled;
700 spin_unlock(&irq->irq_lock);
706 spin_unlock(&vgic_cpu->ap_list_lock);
711 void vgic_kick_vcpus(struct kvm *kvm)
713 struct kvm_vcpu *vcpu;
717 * We've injected an interrupt, time to find out who deserves
720 kvm_for_each_vcpu(c, vcpu, kvm) {
721 if (kvm_vgic_vcpu_pending_irq(vcpu))
726 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
728 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
731 spin_lock(&irq->irq_lock);
732 map_is_active = irq->hw && irq->active;
733 spin_unlock(&irq->irq_lock);
734 vgic_put_irq(vcpu->kvm, irq);
736 return map_is_active;