2 * Copyright (C) 2015, 2016 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kvm.h>
18 #include <linux/kvm_host.h>
19 #include <linux/list_sort.h>
23 #define CREATE_TRACE_POINTS
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
29 #define DEBUG_SPINLOCK_BUG_ON(p)
32 struct vgic_global kvm_vgic_global_state __ro_after_init = {
33 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
37 * Locking order is always:
39 * its->cmd_lock (mutex)
40 * its->its_lock (mutex)
41 * vgic_cpu->ap_list_lock
45 * If you need to take multiple locks, always take the upper lock first,
46 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
47 * If you are already holding a lock and need to take a higher one, you
48 * have to drop the lower ranking lock first and re-aquire it after having
49 * taken the upper one.
51 * When taking more than one ap_list_lock at the same time, always take the
52 * lowest numbered VCPU's ap_list_lock first, so:
53 * vcpuX->vcpu_id < vcpuY->vcpu_id:
54 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
55 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
59 * Iterate over the VM's list of mapped LPIs to find the one with a
60 * matching interrupt ID and return a reference to the IRQ structure.
62 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
64 struct vgic_dist *dist = &kvm->arch.vgic;
65 struct vgic_irq *irq = NULL;
67 spin_lock(&dist->lpi_list_lock);
69 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
70 if (irq->intid != intid)
74 * This increases the refcount, the caller is expected to
75 * call vgic_put_irq() later once it's finished with the IRQ.
77 vgic_get_irq_kref(irq);
83 spin_unlock(&dist->lpi_list_lock);
89 * This looks up the virtual interrupt ID to get the corresponding
90 * struct vgic_irq. It also increases the refcount, so any caller is expected
91 * to call vgic_put_irq() once it's finished with this IRQ.
93 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
97 if (intid <= VGIC_MAX_PRIVATE)
98 return &vcpu->arch.vgic_cpu.private_irqs[intid];
101 if (intid <= VGIC_MAX_SPI)
102 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
105 if (intid >= VGIC_MIN_LPI)
106 return vgic_get_lpi(kvm, intid);
108 WARN(1, "Looking up struct vgic_irq for reserved INTID");
113 * We can't do anything in here, because we lack the kvm pointer to
114 * lock and remove the item from the lpi_list. So we keep this function
115 * empty and use the return value of kref_put() to trigger the freeing.
117 static void vgic_irq_release(struct kref *ref)
121 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
123 struct vgic_dist *dist = &kvm->arch.vgic;
125 if (irq->intid < VGIC_MIN_LPI)
128 spin_lock(&dist->lpi_list_lock);
129 if (!kref_put(&irq->refcount, vgic_irq_release)) {
130 spin_unlock(&dist->lpi_list_lock);
134 list_del(&irq->lpi_list);
135 dist->lpi_list_count--;
136 spin_unlock(&dist->lpi_list_lock);
142 * kvm_vgic_target_oracle - compute the target vcpu for an irq
144 * @irq: The irq to route. Must be already locked.
146 * Based on the current state of the interrupt (enabled, pending,
147 * active, vcpu and target_vcpu), compute the next vcpu this should be
148 * given to. Return NULL if this shouldn't be injected at all.
150 * Requires the IRQ lock to be held.
152 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
154 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
156 /* If the interrupt is active, it must stay on the current vcpu */
158 return irq->vcpu ? : irq->target_vcpu;
161 * If the IRQ is not active but enabled and pending, we should direct
162 * it to its configured target VCPU.
163 * If the distributor is disabled, pending interrupts shouldn't be
166 if (irq->enabled && irq_is_pending(irq)) {
167 if (unlikely(irq->target_vcpu &&
168 !irq->target_vcpu->kvm->arch.vgic.enabled))
171 return irq->target_vcpu;
174 /* If neither active nor pending and enabled, then this IRQ should not
175 * be queued to any VCPU.
181 * The order of items in the ap_lists defines how we'll pack things in LRs as
182 * well, the first items in the list being the first things populated in the
185 * A hard rule is that active interrupts can never be pushed out of the LRs
186 * (and therefore take priority) since we cannot reliably trap on deactivation
187 * of IRQs and therefore they have to be present in the LRs.
189 * Otherwise things should be sorted by the priority field and the GIC
190 * hardware support will take care of preemption of priority groups etc.
192 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
193 * to sort "b" before "a".
195 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
197 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
198 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
203 * list_sort may call this function with the same element when
204 * the list is fairly long.
206 if (unlikely(irqa == irqb))
209 spin_lock(&irqa->irq_lock);
210 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
212 if (irqa->active || irqb->active) {
213 ret = (int)irqb->active - (int)irqa->active;
217 penda = irqa->enabled && irq_is_pending(irqa);
218 pendb = irqb->enabled && irq_is_pending(irqb);
220 if (!penda || !pendb) {
221 ret = (int)pendb - (int)penda;
225 /* Both pending and enabled, sort by priority */
226 ret = irqa->priority - irqb->priority;
228 spin_unlock(&irqb->irq_lock);
229 spin_unlock(&irqa->irq_lock);
233 /* Must be called with the ap_list_lock held */
234 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
236 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
238 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
240 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
244 * Only valid injection if changing level for level-triggered IRQs or for a
245 * rising edge, and in-kernel connected IRQ lines can only be controlled by
248 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
250 if (irq->owner != owner)
253 switch (irq->config) {
254 case VGIC_CONFIG_LEVEL:
255 return irq->line_level != level;
256 case VGIC_CONFIG_EDGE:
264 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
265 * Do the queuing if necessary, taking the right locks in the right order.
266 * Returns true when the IRQ was queued, false otherwise.
268 * Needs to be entered with the IRQ lock already held, but will return
269 * with all locks dropped.
271 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
273 struct kvm_vcpu *vcpu;
275 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
278 vcpu = vgic_target_oracle(irq);
279 if (irq->vcpu || !vcpu) {
281 * If this IRQ is already on a VCPU's ap_list, then it
282 * cannot be moved or modified and there is no more work for
285 * Otherwise, if the irq is not pending and enabled, it does
286 * not need to be inserted into an ap_list and there is also
287 * no more work for us to do.
289 spin_unlock(&irq->irq_lock);
292 * We have to kick the VCPU here, because we could be
293 * queueing an edge-triggered interrupt for which we
294 * get no EOI maintenance interrupt. In that case,
295 * while the IRQ is already on the VCPU's AP list, the
296 * VCPU could have EOI'ed the original interrupt and
297 * won't see this one until it exits for some other
301 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
308 * We must unlock the irq lock to take the ap_list_lock where
309 * we are going to insert this new pending interrupt.
311 spin_unlock(&irq->irq_lock);
313 /* someone can do stuff here, which we re-check below */
315 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
316 spin_lock(&irq->irq_lock);
319 * Did something change behind our backs?
321 * There are two cases:
322 * 1) The irq lost its pending state or was disabled behind our
323 * backs and/or it was queued to another VCPU's ap_list.
324 * 2) Someone changed the affinity on this irq behind our
325 * backs and we are now holding the wrong ap_list_lock.
327 * In both cases, drop the locks and retry.
330 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
331 spin_unlock(&irq->irq_lock);
332 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
334 spin_lock(&irq->irq_lock);
339 * Grab a reference to the irq to reflect the fact that it is
340 * now in the ap_list.
342 vgic_get_irq_kref(irq);
343 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
346 spin_unlock(&irq->irq_lock);
347 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
349 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
356 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
357 * @kvm: The VM structure pointer
358 * @cpuid: The CPU for PPIs
359 * @intid: The INTID to inject a new state to.
360 * @level: Edge-triggered: true: to trigger the interrupt
361 * false: to ignore the call
362 * Level-sensitive true: raise the input signal
363 * false: lower the input signal
364 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
365 * that the caller is allowed to inject this IRQ. Userspace
366 * injections will have owner == NULL.
368 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
369 * level-sensitive interrupts. You can think of the level parameter as 1
370 * being HIGH and 0 being LOW and all devices being active-HIGH.
372 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
373 bool level, void *owner)
375 struct kvm_vcpu *vcpu;
376 struct vgic_irq *irq;
379 trace_vgic_update_irq_pending(cpuid, intid, level);
381 ret = vgic_lazy_init(kvm);
385 vcpu = kvm_get_vcpu(kvm, cpuid);
386 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
389 irq = vgic_get_irq(kvm, vcpu, intid);
393 spin_lock(&irq->irq_lock);
395 if (!vgic_validate_injection(irq, level, owner)) {
396 /* Nothing to see here, move along... */
397 spin_unlock(&irq->irq_lock);
398 vgic_put_irq(kvm, irq);
402 if (irq->config == VGIC_CONFIG_LEVEL)
403 irq->line_level = level;
405 irq->pending_latch = true;
407 vgic_queue_irq_unlock(kvm, irq);
408 vgic_put_irq(kvm, irq);
413 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
415 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
419 spin_lock(&irq->irq_lock);
422 irq->hwintid = phys_irq;
424 spin_unlock(&irq->irq_lock);
425 vgic_put_irq(vcpu->kvm, irq);
430 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
432 struct vgic_irq *irq;
434 if (!vgic_initialized(vcpu->kvm))
437 irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
440 spin_lock(&irq->irq_lock);
445 spin_unlock(&irq->irq_lock);
446 vgic_put_irq(vcpu->kvm, irq);
452 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
454 * @vcpu: Pointer to the VCPU (used for PPIs)
455 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
456 * @owner: Opaque pointer to the owner
458 * Returns 0 if intid is not already used by another in-kernel device and the
459 * owner is set, otherwise returns an error code.
461 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
463 struct vgic_irq *irq;
467 if (!vgic_initialized(vcpu->kvm))
470 /* SGIs and LPIs cannot be wired up to any device */
471 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
474 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
475 spin_lock_irqsave(&irq->irq_lock, flags);
476 if (irq->owner && irq->owner != owner)
480 spin_unlock_irqrestore(&irq->irq_lock, flags);
486 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
488 * @vcpu: The VCPU pointer
490 * Go over the list of "interesting" interrupts, and prune those that we
491 * won't have to consider in the near future.
493 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
495 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
496 struct vgic_irq *irq, *tmp;
499 spin_lock(&vgic_cpu->ap_list_lock);
501 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
502 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
504 spin_lock(&irq->irq_lock);
506 BUG_ON(vcpu != irq->vcpu);
508 target_vcpu = vgic_target_oracle(irq);
512 * We don't need to process this interrupt any
513 * further, move it off the list.
515 list_del(&irq->ap_list);
517 spin_unlock(&irq->irq_lock);
520 * This vgic_put_irq call matches the
521 * vgic_get_irq_kref in vgic_queue_irq_unlock,
522 * where we added the LPI to the ap_list. As
523 * we remove the irq from the list, we drop
524 * also drop the refcount.
526 vgic_put_irq(vcpu->kvm, irq);
530 if (target_vcpu == vcpu) {
531 /* We're on the right CPU */
532 spin_unlock(&irq->irq_lock);
536 /* This interrupt looks like it has to be migrated. */
538 spin_unlock(&irq->irq_lock);
539 spin_unlock(&vgic_cpu->ap_list_lock);
542 * Ensure locking order by always locking the smallest
545 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
553 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
554 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
555 SINGLE_DEPTH_NESTING);
556 spin_lock(&irq->irq_lock);
559 * If the affinity has been preserved, move the
560 * interrupt around. Otherwise, it means things have
561 * changed while the interrupt was unlocked, and we
562 * need to replay this.
564 * In all cases, we cannot trust the list not to have
565 * changed, so we restart from the beginning.
567 if (target_vcpu == vgic_target_oracle(irq)) {
568 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
570 list_del(&irq->ap_list);
571 irq->vcpu = target_vcpu;
572 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
575 spin_unlock(&irq->irq_lock);
576 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
577 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
581 spin_unlock(&vgic_cpu->ap_list_lock);
584 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
586 if (kvm_vgic_global_state.type == VGIC_V2)
587 vgic_v2_fold_lr_state(vcpu);
589 vgic_v3_fold_lr_state(vcpu);
592 /* Requires the irq_lock to be held. */
593 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
594 struct vgic_irq *irq, int lr)
596 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
598 if (kvm_vgic_global_state.type == VGIC_V2)
599 vgic_v2_populate_lr(vcpu, irq, lr);
601 vgic_v3_populate_lr(vcpu, irq, lr);
604 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
606 if (kvm_vgic_global_state.type == VGIC_V2)
607 vgic_v2_clear_lr(vcpu, lr);
609 vgic_v3_clear_lr(vcpu, lr);
612 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
614 if (kvm_vgic_global_state.type == VGIC_V2)
615 vgic_v2_set_underflow(vcpu);
617 vgic_v3_set_underflow(vcpu);
620 static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
622 if (kvm_vgic_global_state.type == VGIC_V2)
623 vgic_v2_set_npie(vcpu);
625 vgic_v3_set_npie(vcpu);
628 /* Requires the ap_list_lock to be held. */
629 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
632 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
633 struct vgic_irq *irq;
638 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
640 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
641 spin_lock(&irq->irq_lock);
642 /* GICv2 SGIs can count for more than one... */
643 if (vgic_irq_is_sgi(irq->intid) && irq->source) {
644 int w = hweight8(irq->source);
647 *multi_sgi |= (w > 1);
651 spin_unlock(&irq->irq_lock);
656 /* Requires the VCPU's ap_list_lock to be held. */
657 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
659 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
660 struct vgic_irq *irq;
666 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
668 count = compute_ap_list_depth(vcpu, &multi_sgi);
669 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
670 vgic_sort_ap_list(vcpu);
674 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
675 spin_lock(&irq->irq_lock);
678 * If we have multi-SGIs in the pipeline, we need to
679 * guarantee that they are all seen before any IRQ of
680 * lower priority. In that case, we need to filter out
681 * these interrupts by exiting early. This is easy as
682 * the AP list has been sorted already.
684 if (multi_sgi && irq->priority > prio) {
685 spin_unlock(&irq->irq_lock);
689 if (likely(vgic_target_oracle(irq) == vcpu)) {
690 vgic_populate_lr(vcpu, irq, count++);
694 prio = irq->priority;
698 spin_unlock(&irq->irq_lock);
700 if (count == kvm_vgic_global_state.nr_lr) {
701 if (!list_is_last(&irq->ap_list,
702 &vgic_cpu->ap_list_head))
703 vgic_set_underflow(vcpu);
711 vcpu->arch.vgic_cpu.used_lrs = count;
713 /* Nuke remaining LRs */
714 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
715 vgic_clear_lr(vcpu, count);
718 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
719 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
721 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
723 /* An empty ap_list_head implies used_lrs == 0 */
724 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
727 if (vgic_cpu->used_lrs)
728 vgic_fold_lr_state(vcpu);
729 vgic_prune_ap_list(vcpu);
732 /* Flush our emulation state into the GIC hardware before entering the guest. */
733 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
736 * If there are no virtual interrupts active or pending for this
737 * VCPU, then there is no work to do and we can bail out without
738 * taking any lock. There is a potential race with someone injecting
739 * interrupts to the VCPU, but it is a benign race as the VCPU will
740 * either observe the new interrupt before or after doing this check,
741 * and introducing additional synchronization mechanism doesn't change
744 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
747 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
748 vgic_flush_lr_state(vcpu);
749 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
752 void kvm_vgic_load(struct kvm_vcpu *vcpu)
754 if (unlikely(!vgic_initialized(vcpu->kvm)))
757 if (kvm_vgic_global_state.type == VGIC_V2)
763 void kvm_vgic_put(struct kvm_vcpu *vcpu)
765 if (unlikely(!vgic_initialized(vcpu->kvm)))
768 if (kvm_vgic_global_state.type == VGIC_V2)
774 void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
776 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
779 if (kvm_vgic_global_state.type == VGIC_V2)
780 vgic_v2_vmcr_sync(vcpu);
782 vgic_v3_vmcr_sync(vcpu);
785 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
787 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
788 struct vgic_irq *irq;
789 bool pending = false;
791 if (!vcpu->kvm->arch.vgic.enabled)
794 spin_lock(&vgic_cpu->ap_list_lock);
796 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
797 spin_lock(&irq->irq_lock);
798 pending = irq_is_pending(irq) && irq->enabled;
799 spin_unlock(&irq->irq_lock);
805 spin_unlock(&vgic_cpu->ap_list_lock);
810 void vgic_kick_vcpus(struct kvm *kvm)
812 struct kvm_vcpu *vcpu;
816 * We've injected an interrupt, time to find out who deserves
819 kvm_for_each_vcpu(c, vcpu, kvm) {
820 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
821 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
827 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
829 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
832 spin_lock(&irq->irq_lock);
833 map_is_active = irq->hw && irq->active;
834 spin_unlock(&irq->irq_lock);
835 vgic_put_irq(vcpu->kvm, irq);
837 return map_is_active;