1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGIC MMIO handling functions
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
8 #include <linux/interrupt.h>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <kvm/iodev.h>
13 #include <kvm/arm_arch_timer.h>
14 #include <kvm/arm_vgic.h>
17 #include "vgic-mmio.h"
19 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
20 gpa_t addr, unsigned int len)
25 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
31 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
32 unsigned int len, unsigned long val)
37 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len, unsigned long val)
44 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
45 gpa_t addr, unsigned int len)
47 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
51 /* Loop over all IRQs affected by this read */
52 for (i = 0; i < len * 8; i++) {
53 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
58 vgic_put_irq(vcpu->kvm, irq);
64 static void vgic_update_vsgi(struct vgic_irq *irq)
66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
69 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
70 unsigned int len, unsigned long val)
72 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
76 for (i = 0; i < len * 8; i++) {
77 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 raw_spin_lock_irqsave(&irq->irq_lock, flags);
80 irq->group = !!(val & BIT(i));
81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
82 vgic_update_vsgi(irq);
83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
88 vgic_put_irq(vcpu->kvm, irq);
93 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
94 * of the enabled bit, so there is only one function for both here.
96 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
97 gpa_t addr, unsigned int len)
99 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
103 /* Loop over all IRQs affected by this read */
104 for (i = 0; i < len * 8; i++) {
105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
110 vgic_put_irq(vcpu->kvm, irq);
116 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
117 gpa_t addr, unsigned int len,
120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
124 for_each_set_bit(i, &val, len * 8) {
125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
127 raw_spin_lock_irqsave(&irq->irq_lock, flags);
128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
130 struct irq_data *data;
133 data = &irq_to_desc(irq->host_irq)->irq_data;
134 while (irqd_irq_disabled(data))
135 enable_irq(irq->host_irq);
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
139 vgic_put_irq(vcpu->kvm, irq);
142 } else if (vgic_irq_is_mapped_level(irq)) {
143 bool was_high = irq->line_level;
146 * We need to update the state of the interrupt because
147 * the guest might have changed the state of the device
148 * while the interrupt was disabled at the VGIC level.
150 irq->line_level = vgic_get_phys_line_level(irq);
152 * Deactivate the physical interrupt so the GIC will let
153 * us know when it is asserted again.
155 if (!irq->active && was_high && !irq->line_level)
156 vgic_irq_set_phys_active(irq, false);
159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
161 vgic_put_irq(vcpu->kvm, irq);
165 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
166 gpa_t addr, unsigned int len,
169 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
173 for_each_set_bit(i, &val, len * 8) {
174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
176 raw_spin_lock_irqsave(&irq->irq_lock, flags);
177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
178 disable_irq_nosync(irq->host_irq);
180 irq->enabled = false;
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183 vgic_put_irq(vcpu->kvm, irq);
187 int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
188 gpa_t addr, unsigned int len,
191 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
195 for_each_set_bit(i, &val, len * 8) {
196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
198 raw_spin_lock_irqsave(&irq->irq_lock, flags);
200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
202 vgic_put_irq(vcpu->kvm, irq);
208 int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
209 gpa_t addr, unsigned int len,
212 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
216 for_each_set_bit(i, &val, len * 8) {
217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
219 raw_spin_lock_irqsave(&irq->irq_lock, flags);
220 irq->enabled = false;
221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
223 vgic_put_irq(vcpu->kvm, irq);
229 static unsigned long __read_pending(struct kvm_vcpu *vcpu,
230 gpa_t addr, unsigned int len,
233 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
237 /* Loop over all IRQs affected by this read */
238 for (i = 0; i < len * 8; i++) {
239 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
244 * When used from userspace with a GICv3 model:
246 * Pending state of interrupt is latched in pending_latch
247 * variable. Userspace will save and restore pending state
248 * and line_level separately.
249 * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
250 * for handling of ISPENDR and ICPENDR.
252 raw_spin_lock_irqsave(&irq->irq_lock, flags);
253 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
257 err = irq_get_irqchip_state(irq->host_irq,
258 IRQCHIP_STATE_PENDING,
260 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
261 } else if (!is_user && vgic_irq_is_mapped_level(irq)) {
262 val = vgic_get_phys_line_level(irq);
264 switch (vcpu->kvm->arch.vgic.vgic_model) {
265 case KVM_DEV_TYPE_ARM_VGIC_V3:
267 val = irq->pending_latch;
272 val = irq_is_pending(irq);
277 value |= ((u32)val << i);
278 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
280 vgic_put_irq(vcpu->kvm, irq);
286 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
287 gpa_t addr, unsigned int len)
289 return __read_pending(vcpu, addr, len, false);
292 unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
293 gpa_t addr, unsigned int len)
295 return __read_pending(vcpu, addr, len, true);
298 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
300 return (vgic_irq_is_sgi(irq->intid) &&
301 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
304 static void __set_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
305 unsigned long val, bool is_user)
307 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
311 for_each_set_bit(i, &val, len * 8) {
312 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
314 /* GICD_ISPENDR0 SGI bits are WI when written from the guest. */
315 if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
316 vgic_put_irq(vcpu->kvm, irq);
320 raw_spin_lock_irqsave(&irq->irq_lock, flags);
323 * GICv2 SGIs are terribly broken. We can't restore
324 * the source of the interrupt, so just pick the vcpu
325 * itself as the source...
327 if (is_vgic_v2_sgi(vcpu, irq))
328 irq->source |= BIT(vcpu->vcpu_id);
330 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
331 /* HW SGI? Ask the GIC to inject it */
333 err = irq_set_irqchip_state(irq->host_irq,
334 IRQCHIP_STATE_PENDING,
336 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
338 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
339 vgic_put_irq(vcpu->kvm, irq);
344 irq->pending_latch = true;
345 if (irq->hw && !is_user)
346 vgic_irq_set_phys_active(irq, true);
348 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
349 vgic_put_irq(vcpu->kvm, irq);
353 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
354 gpa_t addr, unsigned int len,
357 __set_pending(vcpu, addr, len, val, false);
360 int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
361 gpa_t addr, unsigned int len,
364 __set_pending(vcpu, addr, len, val, true);
368 /* Must be called with irq->irq_lock held */
369 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
371 irq->pending_latch = false;
374 * We don't want the guest to effectively mask the physical
375 * interrupt by doing a write to SPENDR followed by a write to
376 * CPENDR for HW interrupts, so we clear the active state on
377 * the physical side if the virtual interrupt is not active.
378 * This may lead to taking an additional interrupt on the
379 * host, but that should not be a problem as the worst that
380 * can happen is an additional vgic injection. We also clear
381 * the pending state to maintain proper semantics for edge HW
384 vgic_irq_set_phys_pending(irq, false);
386 vgic_irq_set_phys_active(irq, false);
389 static void __clear_pending(struct kvm_vcpu *vcpu,
390 gpa_t addr, unsigned int len,
391 unsigned long val, bool is_user)
393 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
397 for_each_set_bit(i, &val, len * 8) {
398 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
400 /* GICD_ICPENDR0 SGI bits are WI when written from the guest. */
401 if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
402 vgic_put_irq(vcpu->kvm, irq);
406 raw_spin_lock_irqsave(&irq->irq_lock, flags);
409 * More fun with GICv2 SGIs! If we're clearing one of them
410 * from userspace, which source vcpu to clear? Let's not
411 * even think of it, and blow the whole set.
413 if (is_vgic_v2_sgi(vcpu, irq))
416 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
417 /* HW SGI? Ask the GIC to clear its pending bit */
419 err = irq_set_irqchip_state(irq->host_irq,
420 IRQCHIP_STATE_PENDING,
422 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
424 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
425 vgic_put_irq(vcpu->kvm, irq);
430 if (irq->hw && !is_user)
431 vgic_hw_irq_cpending(vcpu, irq);
433 irq->pending_latch = false;
435 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
436 vgic_put_irq(vcpu->kvm, irq);
440 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
441 gpa_t addr, unsigned int len,
444 __clear_pending(vcpu, addr, len, val, false);
447 int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
448 gpa_t addr, unsigned int len,
451 __clear_pending(vcpu, addr, len, val, true);
456 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
457 * is not queued on some running VCPU's LRs, because then the change to the
458 * active state can be overwritten when the VCPU's state is synced coming back
461 * For shared interrupts as well as GICv3 private interrupts accessed from the
462 * non-owning CPU, we have to stop all the VCPUs because interrupts can be
463 * migrated while we don't hold the IRQ locks and we don't want to be chasing
466 * For GICv2 private interrupts we don't have to do anything because
467 * userspace accesses to the VGIC state already require all VCPUs to be
468 * stopped, and only the VCPU itself can modify its private interrupts
469 * active state, which guarantees that the VCPU is not running.
471 static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
473 if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
474 vcpu != kvm_get_running_vcpu()) ||
475 intid >= VGIC_NR_PRIVATE_IRQS)
476 kvm_arm_halt_guest(vcpu->kvm);
479 /* See vgic_access_active_prepare */
480 static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
482 if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
483 vcpu != kvm_get_running_vcpu()) ||
484 intid >= VGIC_NR_PRIVATE_IRQS)
485 kvm_arm_resume_guest(vcpu->kvm);
488 static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
489 gpa_t addr, unsigned int len)
491 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
495 /* Loop over all IRQs affected by this read */
496 for (i = 0; i < len * 8; i++) {
497 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
500 * Even for HW interrupts, don't evaluate the HW state as
501 * all the guest is interested in is the virtual state.
506 vgic_put_irq(vcpu->kvm, irq);
512 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
513 gpa_t addr, unsigned int len)
515 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
518 mutex_lock(&vcpu->kvm->arch.config_lock);
519 vgic_access_active_prepare(vcpu, intid);
521 val = __vgic_mmio_read_active(vcpu, addr, len);
523 vgic_access_active_finish(vcpu, intid);
524 mutex_unlock(&vcpu->kvm->arch.config_lock);
529 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
530 gpa_t addr, unsigned int len)
532 return __vgic_mmio_read_active(vcpu, addr, len);
535 /* Must be called with irq->irq_lock held */
536 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
537 bool active, bool is_uaccess)
542 irq->active = active;
543 vgic_irq_set_phys_active(irq, active);
546 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
550 struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
552 raw_spin_lock_irqsave(&irq->irq_lock, flags);
554 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
555 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
556 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
558 * GICv4.1 VSGI feature doesn't track an active state,
559 * so let's not kid ourselves, there is nothing we can
564 u32 model = vcpu->kvm->arch.vgic.vgic_model;
567 irq->active = active;
570 * The GICv2 architecture indicates that the source CPUID for
571 * an SGI should be provided during an EOI which implies that
572 * the active state is stored somewhere, but at the same time
573 * this state is not architecturally exposed anywhere and we
574 * have no way of knowing the right source.
576 * This may lead to a VCPU not being able to receive
577 * additional instances of a particular SGI after migration
578 * for a GICv2 VM on some GIC implementations. Oh well.
580 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
582 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
583 active && vgic_irq_is_sgi(irq->intid))
584 irq->active_source = active_source;
588 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
590 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
593 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
594 gpa_t addr, unsigned int len,
597 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
600 for_each_set_bit(i, &val, len * 8) {
601 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
602 vgic_mmio_change_active(vcpu, irq, false);
603 vgic_put_irq(vcpu->kvm, irq);
607 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
608 gpa_t addr, unsigned int len,
611 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
613 mutex_lock(&vcpu->kvm->arch.config_lock);
614 vgic_access_active_prepare(vcpu, intid);
616 __vgic_mmio_write_cactive(vcpu, addr, len, val);
618 vgic_access_active_finish(vcpu, intid);
619 mutex_unlock(&vcpu->kvm->arch.config_lock);
622 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
623 gpa_t addr, unsigned int len,
626 __vgic_mmio_write_cactive(vcpu, addr, len, val);
630 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
631 gpa_t addr, unsigned int len,
634 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
637 for_each_set_bit(i, &val, len * 8) {
638 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
639 vgic_mmio_change_active(vcpu, irq, true);
640 vgic_put_irq(vcpu->kvm, irq);
644 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
645 gpa_t addr, unsigned int len,
648 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
650 mutex_lock(&vcpu->kvm->arch.config_lock);
651 vgic_access_active_prepare(vcpu, intid);
653 __vgic_mmio_write_sactive(vcpu, addr, len, val);
655 vgic_access_active_finish(vcpu, intid);
656 mutex_unlock(&vcpu->kvm->arch.config_lock);
659 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
660 gpa_t addr, unsigned int len,
663 __vgic_mmio_write_sactive(vcpu, addr, len, val);
667 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
668 gpa_t addr, unsigned int len)
670 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
674 for (i = 0; i < len; i++) {
675 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
677 val |= (u64)irq->priority << (i * 8);
679 vgic_put_irq(vcpu->kvm, irq);
686 * We currently don't handle changing the priority of an interrupt that
687 * is already pending on a VCPU. If there is a need for this, we would
688 * need to make this VCPU exit and re-evaluate the priorities, potentially
689 * leading to this interrupt getting presented now to the guest (if it has
690 * been masked by the priority mask before).
692 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
693 gpa_t addr, unsigned int len,
696 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
700 for (i = 0; i < len; i++) {
701 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
703 raw_spin_lock_irqsave(&irq->irq_lock, flags);
704 /* Narrow the priority range to what we actually support */
705 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
706 if (irq->hw && vgic_irq_is_sgi(irq->intid))
707 vgic_update_vsgi(irq);
708 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
710 vgic_put_irq(vcpu->kvm, irq);
714 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
715 gpa_t addr, unsigned int len)
717 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
721 for (i = 0; i < len * 4; i++) {
722 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
724 if (irq->config == VGIC_CONFIG_EDGE)
725 value |= (2U << (i * 2));
727 vgic_put_irq(vcpu->kvm, irq);
733 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
734 gpa_t addr, unsigned int len,
737 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
741 for (i = 0; i < len * 4; i++) {
742 struct vgic_irq *irq;
745 * The configuration cannot be changed for SGIs in general,
746 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
747 * code relies on PPIs being level triggered, so we also
748 * make them read-only here.
750 if (intid + i < VGIC_NR_PRIVATE_IRQS)
753 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
754 raw_spin_lock_irqsave(&irq->irq_lock, flags);
756 if (test_bit(i * 2 + 1, &val))
757 irq->config = VGIC_CONFIG_EDGE;
759 irq->config = VGIC_CONFIG_LEVEL;
761 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
762 vgic_put_irq(vcpu->kvm, irq);
766 u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
770 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
772 for (i = 0; i < 32; i++) {
773 struct vgic_irq *irq;
775 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
778 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
779 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
782 vgic_put_irq(vcpu->kvm, irq);
788 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
792 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
795 for (i = 0; i < 32; i++) {
796 struct vgic_irq *irq;
799 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
802 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
805 * Line level is set irrespective of irq type
806 * (level or edge) to avoid dependency that VM should
807 * restore irq config before line level.
809 new_level = !!(val & (1U << i));
810 raw_spin_lock_irqsave(&irq->irq_lock, flags);
811 irq->line_level = new_level;
813 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
815 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
817 vgic_put_irq(vcpu->kvm, irq);
821 static int match_region(const void *key, const void *elt)
823 const unsigned int offset = (unsigned long)key;
824 const struct vgic_register_region *region = elt;
826 if (offset < region->reg_offset)
829 if (offset >= region->reg_offset + region->len)
835 const struct vgic_register_region *
836 vgic_find_mmio_region(const struct vgic_register_region *regions,
837 int nr_regions, unsigned int offset)
839 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
840 sizeof(regions[0]), match_region);
843 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
845 if (kvm_vgic_global_state.type == VGIC_V2)
846 vgic_v2_set_vmcr(vcpu, vmcr);
848 vgic_v3_set_vmcr(vcpu, vmcr);
851 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
853 if (kvm_vgic_global_state.type == VGIC_V2)
854 vgic_v2_get_vmcr(vcpu, vmcr);
856 vgic_v3_get_vmcr(vcpu, vmcr);
860 * kvm_mmio_read_buf() returns a value in a format where it can be converted
861 * to a byte array and be directly observed as the guest wanted it to appear
862 * in memory if it had done the store itself, which is LE for the GIC, as the
863 * guest knows the GIC is always LE.
865 * We convert this value to the CPUs native format to deal with it as a data
868 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
870 unsigned long data = kvm_mmio_read_buf(val, len);
876 return le16_to_cpu(data);
878 return le32_to_cpu(data);
880 return le64_to_cpu(data);
885 * kvm_mmio_write_buf() expects a value in a format such that if converted to
886 * a byte array it is observed as the guest would see it if it could perform
887 * the load directly. Since the GIC is LE, and the guest knows this, the
888 * guest expects a value in little endian format.
890 * We convert the data value from the CPUs native format to LE so that the
891 * value is returned in the proper format.
893 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
900 data = cpu_to_le16(data);
903 data = cpu_to_le32(data);
906 data = cpu_to_le64(data);
909 kvm_mmio_write_buf(buf, len, data);
913 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
915 return container_of(dev, struct vgic_io_device, dev);
918 static bool check_region(const struct kvm *kvm,
919 const struct vgic_register_region *region,
922 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
926 flags = VGIC_ACCESS_8bit;
929 flags = VGIC_ACCESS_32bit;
932 flags = VGIC_ACCESS_64bit;
938 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
939 if (!region->bits_per_irq)
942 /* Do we access a non-allocated IRQ? */
943 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
949 const struct vgic_register_region *
950 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
953 const struct vgic_register_region *region;
955 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
956 addr - iodev->base_addr);
957 if (!region || !check_region(vcpu->kvm, region, addr, len))
963 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
964 gpa_t addr, u32 *val)
966 const struct vgic_register_region *region;
967 struct kvm_vcpu *r_vcpu;
969 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
975 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
976 if (region->uaccess_read)
977 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
979 *val = region->read(r_vcpu, addr, sizeof(u32));
984 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
985 gpa_t addr, const u32 *val)
987 const struct vgic_register_region *region;
988 struct kvm_vcpu *r_vcpu;
990 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
994 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
995 if (region->uaccess_write)
996 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
998 region->write(r_vcpu, addr, sizeof(u32), *val);
1003 * Userland access to VGIC registers.
1005 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
1006 bool is_write, int offset, u32 *val)
1009 return vgic_uaccess_write(vcpu, dev, offset, val);
1011 return vgic_uaccess_read(vcpu, dev, offset, val);
1014 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1015 gpa_t addr, int len, void *val)
1017 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1018 const struct vgic_register_region *region;
1019 unsigned long data = 0;
1021 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1023 memset(val, 0, len);
1027 switch (iodev->iodev_type) {
1029 data = region->read(vcpu, addr, len);
1032 data = region->read(vcpu, addr, len);
1035 data = region->read(iodev->redist_vcpu, addr, len);
1038 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
1042 vgic_data_host_to_mmio_bus(val, len, data);
1046 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1047 gpa_t addr, int len, const void *val)
1049 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1050 const struct vgic_register_region *region;
1051 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
1053 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1057 switch (iodev->iodev_type) {
1059 region->write(vcpu, addr, len, data);
1062 region->write(vcpu, addr, len, data);
1065 region->write(iodev->redist_vcpu, addr, len, data);
1068 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
1075 const struct kvm_io_device_ops kvm_io_gic_ops = {
1076 .read = dispatch_mmio_read,
1077 .write = dispatch_mmio_write,
1080 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
1081 enum vgic_type type)
1083 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
1088 len = vgic_v2_init_dist_iodev(io_device);
1091 len = vgic_v3_init_dist_iodev(io_device);
1097 io_device->base_addr = dist_base_address;
1098 io_device->iodev_type = IODEV_DIST;
1099 io_device->redist_vcpu = NULL;
1101 return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
1102 len, &io_device->dev);