1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGICv3 MMIO handling functions
6 #include <linux/bitfield.h>
7 #include <linux/irqchip/arm-gic-v3.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <kvm/iodev.h>
12 #include <kvm/arm_vgic.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
19 #include "vgic-mmio.h"
21 /* extract @num bytes at @offset bytes offset in data */
22 unsigned long extract_bytes(u64 data, unsigned int offset,
25 return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
28 /* allows updates of any half of a 64-bit register (or the whole thing) */
29 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
32 int lower = (offset & 4) * 8;
33 int upper = lower + 8 * len - 1;
35 reg &= ~GENMASK_ULL(upper, lower);
36 val &= GENMASK_ULL(len * 8 - 1, 0);
38 return reg | ((u64)val << lower);
41 bool vgic_has_its(struct kvm *kvm)
43 struct vgic_dist *dist = &kvm->arch.vgic;
45 if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
51 bool vgic_supports_direct_msis(struct kvm *kvm)
53 return (kvm_vgic_global_state.has_gicv4_1 ||
54 (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
58 * The Revision field in the IIDR have the following meanings:
60 * Revision 2: Interrupt groups are guest-configurable and signaled using
61 * their configured groups.
64 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
65 gpa_t addr, unsigned int len)
67 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
70 switch (addr & 0x0c) {
73 value |= GICD_CTLR_ENABLE_SS_G1;
74 value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
76 value |= GICD_CTLR_nASSGIreq;
79 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
80 value = (value >> 5) - 1;
81 if (vgic_has_its(vcpu->kvm)) {
82 value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
83 value |= GICD_TYPER_LPIS;
85 value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
89 if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi())
90 value = GICD_TYPER2_nASSGIcap;
93 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
94 (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
95 (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
104 static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
105 gpa_t addr, unsigned int len,
108 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
110 switch (addr & 0x0c) {
112 bool was_enabled, is_hwsgi;
114 mutex_lock(&vcpu->kvm->arch.config_lock);
116 was_enabled = dist->enabled;
117 is_hwsgi = dist->nassgireq;
119 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
121 /* Not a GICv4.1? No HW SGIs */
122 if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi())
123 val &= ~GICD_CTLR_nASSGIreq;
125 /* Dist stays enabled? nASSGIreq is RO */
126 if (was_enabled && dist->enabled) {
127 val &= ~GICD_CTLR_nASSGIreq;
128 val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
131 /* Switching HW SGIs? */
132 dist->nassgireq = val & GICD_CTLR_nASSGIreq;
133 if (is_hwsgi != dist->nassgireq)
134 vgic_v4_configure_vsgis(vcpu->kvm);
136 if (kvm_vgic_global_state.has_gicv4_1 &&
137 was_enabled != dist->enabled)
138 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
139 else if (!was_enabled && dist->enabled)
140 vgic_kick_vcpus(vcpu->kvm);
142 mutex_unlock(&vcpu->kvm->arch.config_lock);
148 /* This is at best for documentation purposes... */
153 static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
154 gpa_t addr, unsigned int len,
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
160 switch (addr & 0x0c) {
162 if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
166 reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
167 if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
170 reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
172 case KVM_VGIC_IMP_REV_2:
173 case KVM_VGIC_IMP_REV_3:
174 dist->implementation_rev = reg;
180 /* Not a GICv4.1? No HW SGIs */
181 if (!kvm_vgic_global_state.has_gicv4_1)
182 val &= ~GICD_CTLR_nASSGIreq;
184 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
185 dist->nassgireq = val & GICD_CTLR_nASSGIreq;
189 vgic_mmio_write_v3_misc(vcpu, addr, len, val);
193 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
194 gpa_t addr, unsigned int len)
196 int intid = VGIC_ADDR_TO_INTID(addr, 64);
197 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
198 unsigned long ret = 0;
203 /* The upper word is RAZ for us. */
205 ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
207 vgic_put_irq(vcpu->kvm, irq);
211 static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
212 gpa_t addr, unsigned int len,
215 int intid = VGIC_ADDR_TO_INTID(addr, 64);
216 struct vgic_irq *irq;
219 /* The upper word is WI for us since we don't implement Aff3. */
223 irq = vgic_get_irq(vcpu->kvm, NULL, intid);
228 raw_spin_lock_irqsave(&irq->irq_lock, flags);
230 /* We only care about and preserve Aff0, Aff1 and Aff2. */
231 irq->mpidr = val & GENMASK(23, 0);
232 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
234 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
235 vgic_put_irq(vcpu->kvm, irq);
238 bool vgic_lpis_enabled(struct kvm_vcpu *vcpu)
240 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
242 return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
245 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
246 gpa_t addr, unsigned int len)
248 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
251 val = atomic_read(&vgic_cpu->ctlr);
252 if (vgic_get_implementation_rev(vcpu) >= KVM_VGIC_IMP_REV_3)
253 val |= GICR_CTLR_IR | GICR_CTLR_CES;
258 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
259 gpa_t addr, unsigned int len,
262 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
265 if (!vgic_has_its(vcpu->kvm))
268 if (!(val & GICR_CTLR_ENABLE_LPIS)) {
270 * Don't disable if RWP is set, as there already an
271 * ongoing disable. Funky guest...
273 ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
274 GICR_CTLR_ENABLE_LPIS,
276 if (ctlr != GICR_CTLR_ENABLE_LPIS)
279 vgic_flush_pending_lpis(vcpu);
280 vgic_its_invalidate_cache(vcpu->kvm);
281 atomic_set_release(&vgic_cpu->ctlr, 0);
283 ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
284 GICR_CTLR_ENABLE_LPIS);
288 vgic_enable_lpis(vcpu);
292 static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
294 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
295 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
296 struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
301 if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
303 } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
304 struct list_head *rd_regions = &vgic->rd_regions;
305 gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
308 * the rdist is the last one of the redist region,
309 * check whether there is no other contiguous rdist region
311 list_for_each_entry(iter, rd_regions, list) {
312 if (iter->base == end && iter->free_index > 0)
319 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
320 gpa_t addr, unsigned int len)
322 unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
323 int target_vcpu_id = vcpu->vcpu_id;
326 value = (u64)(mpidr & GENMASK(23, 0)) << 32;
327 value |= ((target_vcpu_id & 0xffff) << 8);
329 if (vgic_has_its(vcpu->kvm))
330 value |= GICR_TYPER_PLPIS;
332 if (vgic_mmio_vcpu_rdist_is_last(vcpu))
333 value |= GICR_TYPER_LAST;
335 return extract_bytes(value, addr & 7, len);
338 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
339 gpa_t addr, unsigned int len)
341 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
344 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
345 gpa_t addr, unsigned int len)
347 switch (addr & 0xffff) {
349 /* report a GICv3 compliant implementation */
356 static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
357 gpa_t addr, unsigned int len,
360 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
364 for (i = 0; i < len * 8; i++) {
365 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
367 raw_spin_lock_irqsave(&irq->irq_lock, flags);
370 * pending_latch is set irrespective of irq type
371 * (level or edge) to avoid dependency that VM should
372 * restore irq config before pending info.
374 irq->pending_latch = test_bit(i, &val);
376 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
377 irq_set_irqchip_state(irq->host_irq,
378 IRQCHIP_STATE_PENDING,
380 irq->pending_latch = false;
383 if (irq->pending_latch)
384 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
386 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
388 vgic_put_irq(vcpu->kvm, irq);
394 /* We want to avoid outer shareable. */
395 u64 vgic_sanitise_shareability(u64 field)
398 case GIC_BASER_OuterShareable:
399 return GIC_BASER_InnerShareable;
405 /* Avoid any inner non-cacheable mapping. */
406 u64 vgic_sanitise_inner_cacheability(u64 field)
409 case GIC_BASER_CACHE_nCnB:
410 case GIC_BASER_CACHE_nC:
411 return GIC_BASER_CACHE_RaWb;
417 /* Non-cacheable or same-as-inner are OK. */
418 u64 vgic_sanitise_outer_cacheability(u64 field)
421 case GIC_BASER_CACHE_SameAsInner:
422 case GIC_BASER_CACHE_nC:
425 return GIC_BASER_CACHE_SameAsInner;
429 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
430 u64 (*sanitise_fn)(u64))
432 u64 field = (reg & field_mask) >> field_shift;
434 field = sanitise_fn(field) << field_shift;
435 return (reg & ~field_mask) | field;
438 #define PROPBASER_RES0_MASK \
439 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
440 #define PENDBASER_RES0_MASK \
441 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
442 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
444 static u64 vgic_sanitise_pendbaser(u64 reg)
446 reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
447 GICR_PENDBASER_SHAREABILITY_SHIFT,
448 vgic_sanitise_shareability);
449 reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
450 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
451 vgic_sanitise_inner_cacheability);
452 reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
453 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
454 vgic_sanitise_outer_cacheability);
456 reg &= ~PENDBASER_RES0_MASK;
461 static u64 vgic_sanitise_propbaser(u64 reg)
463 reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
464 GICR_PROPBASER_SHAREABILITY_SHIFT,
465 vgic_sanitise_shareability);
466 reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
467 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
468 vgic_sanitise_inner_cacheability);
469 reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
470 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
471 vgic_sanitise_outer_cacheability);
473 reg &= ~PROPBASER_RES0_MASK;
477 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
478 gpa_t addr, unsigned int len)
480 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
482 return extract_bytes(dist->propbaser, addr & 7, len);
485 static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
486 gpa_t addr, unsigned int len,
489 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
490 u64 old_propbaser, propbaser;
492 /* Storing a value with LPIs already enabled is undefined */
493 if (vgic_lpis_enabled(vcpu))
497 old_propbaser = READ_ONCE(dist->propbaser);
498 propbaser = old_propbaser;
499 propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
500 propbaser = vgic_sanitise_propbaser(propbaser);
501 } while (cmpxchg64(&dist->propbaser, old_propbaser,
502 propbaser) != old_propbaser);
505 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
506 gpa_t addr, unsigned int len)
508 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
509 u64 value = vgic_cpu->pendbaser;
511 value &= ~GICR_PENDBASER_PTZ;
513 return extract_bytes(value, addr & 7, len);
516 static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
517 gpa_t addr, unsigned int len,
520 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
521 u64 old_pendbaser, pendbaser;
523 /* Storing a value with LPIs already enabled is undefined */
524 if (vgic_lpis_enabled(vcpu))
528 old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
529 pendbaser = old_pendbaser;
530 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
531 pendbaser = vgic_sanitise_pendbaser(pendbaser);
532 } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
533 pendbaser) != old_pendbaser);
536 static unsigned long vgic_mmio_read_sync(struct kvm_vcpu *vcpu,
537 gpa_t addr, unsigned int len)
539 return !!atomic_read(&vcpu->arch.vgic_cpu.syncr_busy);
542 static void vgic_set_rdist_busy(struct kvm_vcpu *vcpu, bool busy)
545 atomic_inc(&vcpu->arch.vgic_cpu.syncr_busy);
546 smp_mb__after_atomic();
548 smp_mb__before_atomic();
549 atomic_dec(&vcpu->arch.vgic_cpu.syncr_busy);
553 static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
554 gpa_t addr, unsigned int len,
557 struct vgic_irq *irq;
560 * If the guest wrote only to the upper 32bit part of the
561 * register, drop the write on the floor, as it is only for
562 * vPEs (which we don't support for obvious reasons).
564 * Also discard the access if LPIs are not enabled.
566 if ((addr & 4) || !vgic_lpis_enabled(vcpu))
569 vgic_set_rdist_busy(vcpu, true);
571 irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val));
573 vgic_its_inv_lpi(vcpu->kvm, irq);
574 vgic_put_irq(vcpu->kvm, irq);
577 vgic_set_rdist_busy(vcpu, false);
580 static void vgic_mmio_write_invall(struct kvm_vcpu *vcpu,
581 gpa_t addr, unsigned int len,
584 /* See vgic_mmio_write_invlpi() for the early return rationale */
585 if ((addr & 4) || !vgic_lpis_enabled(vcpu))
588 vgic_set_rdist_busy(vcpu, true);
589 vgic_its_invall(vcpu);
590 vgic_set_rdist_busy(vcpu, false);
594 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
595 * redistributors, while SPIs are covered by registers in the distributor
596 * block. Trying to set private IRQs in this block gets ignored.
597 * We take some special care here to fix the calculation of the register
600 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
603 .bits_per_irq = bpi, \
604 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
605 .access_flags = acc, \
606 .read = vgic_mmio_read_raz, \
607 .write = vgic_mmio_write_wi, \
609 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
610 .bits_per_irq = bpi, \
611 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
612 .access_flags = acc, \
615 .uaccess_read = ur, \
616 .uaccess_write = uw, \
619 static const struct vgic_register_region vgic_v3_dist_registers[] = {
620 REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR,
621 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc,
622 NULL, vgic_mmio_uaccess_write_v3_misc,
623 16, VGIC_ACCESS_32bit),
624 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
625 vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
627 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
628 vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
630 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
631 vgic_mmio_read_enable, vgic_mmio_write_senable,
632 NULL, vgic_uaccess_write_senable, 1,
634 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
635 vgic_mmio_read_enable, vgic_mmio_write_cenable,
636 NULL, vgic_uaccess_write_cenable, 1,
638 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
639 vgic_mmio_read_pending, vgic_mmio_write_spending,
640 vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
642 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
643 vgic_mmio_read_pending, vgic_mmio_write_cpending,
644 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1,
646 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
647 vgic_mmio_read_active, vgic_mmio_write_sactive,
648 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
650 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
651 vgic_mmio_read_active, vgic_mmio_write_cactive,
652 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
653 1, VGIC_ACCESS_32bit),
654 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
655 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
656 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
657 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
658 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
659 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
660 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
661 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
663 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
664 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
666 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
667 vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
668 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
669 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
670 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
674 static const struct vgic_register_region vgic_v3_rd_registers[] = {
675 /* RD_base registers */
676 REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
677 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
679 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
680 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
682 REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
683 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
685 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
686 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
687 NULL, vgic_mmio_uaccess_write_wi, 8,
688 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
689 REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
690 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
692 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
693 vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
694 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
695 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
696 vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
697 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
698 REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
699 vgic_mmio_read_raz, vgic_mmio_write_invlpi, 8,
700 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
701 REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
702 vgic_mmio_read_raz, vgic_mmio_write_invall, 8,
703 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
704 REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
705 vgic_mmio_read_sync, vgic_mmio_write_wi, 4,
707 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
708 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
710 /* SGI_base registers */
711 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
712 vgic_mmio_read_group, vgic_mmio_write_group, 4,
714 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
715 vgic_mmio_read_enable, vgic_mmio_write_senable,
716 NULL, vgic_uaccess_write_senable, 4,
718 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
719 vgic_mmio_read_enable, vgic_mmio_write_cenable,
720 NULL, vgic_uaccess_write_cenable, 4,
722 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
723 vgic_mmio_read_pending, vgic_mmio_write_spending,
724 vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
726 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
727 vgic_mmio_read_pending, vgic_mmio_write_cpending,
728 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4,
730 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
731 vgic_mmio_read_active, vgic_mmio_write_sactive,
732 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
734 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
735 vgic_mmio_read_active, vgic_mmio_write_cactive,
736 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
738 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
739 vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
740 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
741 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0,
742 vgic_mmio_read_config, vgic_mmio_write_config, 8,
744 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0,
745 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
747 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR,
748 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
752 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
754 dev->regions = vgic_v3_dist_registers;
755 dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
757 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
763 * vgic_register_redist_iodev - register a single redist iodev
764 * @vcpu: The VCPU to which the redistributor belongs
766 * Register a KVM iodev for this VCPU's redistributor using the address
769 * Return 0 on success, -ERRNO otherwise.
771 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
773 struct kvm *kvm = vcpu->kvm;
774 struct vgic_dist *vgic = &kvm->arch.vgic;
775 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
776 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
777 struct vgic_redist_region *rdreg;
781 lockdep_assert_held(&kvm->slots_lock);
782 mutex_lock(&kvm->arch.config_lock);
784 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
788 * We may be creating VCPUs before having set the base address for the
789 * redistributor region, in which case we will come back to this
790 * function for all VCPUs when the base address is set. Just return
791 * without doing any work for now.
793 rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
797 if (!vgic_v3_check_base(kvm)) {
802 vgic_cpu->rdreg = rdreg;
803 vgic_cpu->rdreg_index = rdreg->free_index;
805 rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
807 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
808 rd_dev->base_addr = rd_base;
809 rd_dev->iodev_type = IODEV_REDIST;
810 rd_dev->regions = vgic_v3_rd_registers;
811 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
812 rd_dev->redist_vcpu = vcpu;
814 mutex_unlock(&kvm->arch.config_lock);
816 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
817 2 * SZ_64K, &rd_dev->dev);
821 /* Protected by slots_lock */
826 mutex_unlock(&kvm->arch.config_lock);
830 void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
832 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
834 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
837 static int vgic_register_all_redist_iodevs(struct kvm *kvm)
839 struct kvm_vcpu *vcpu;
843 lockdep_assert_held(&kvm->slots_lock);
845 kvm_for_each_vcpu(c, vcpu, kvm) {
846 ret = vgic_register_redist_iodev(vcpu);
852 /* The current c failed, so iterate over the previous ones. */
855 for (i = 0; i < c; i++) {
856 vcpu = kvm_get_vcpu(kvm, i);
857 vgic_unregister_redist_iodev(vcpu);
865 * vgic_v3_alloc_redist_region - Allocate a new redistributor region
867 * Performs various checks before inserting the rdist region in the list.
868 * Those tests depend on whether the size of the rdist region is known
869 * (ie. count != 0). The list is sorted by rdist region index.
872 * @index: redist region index
873 * @base: base of the new rdist region
874 * @count: number of redistributors the region is made of (0 in the old style
875 * single region, whose size is induced from the number of vcpus)
877 * Return 0 on success, < 0 otherwise
879 static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
880 gpa_t base, uint32_t count)
882 struct vgic_dist *d = &kvm->arch.vgic;
883 struct vgic_redist_region *rdreg;
884 struct list_head *rd_regions = &d->rd_regions;
885 int nr_vcpus = atomic_read(&kvm->online_vcpus);
886 size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE
887 : nr_vcpus * KVM_VGIC_V3_REDIST_SIZE;
890 /* cross the end of memory ? */
891 if (base + size < base)
894 if (list_empty(rd_regions)) {
898 rdreg = list_last_entry(rd_regions,
899 struct vgic_redist_region, list);
901 /* Don't mix single region and discrete redist regions */
902 if (!count && rdreg->count)
908 if (index != rdreg->index + 1)
913 * For legacy single-region redistributor regions (!count),
914 * check that the redistributor region does not overlap with the
915 * distributor's address space.
917 if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
918 vgic_dist_overlap(kvm, base, size))
921 /* collision with any other rdist region? */
922 if (vgic_v3_rdist_overlap(kvm, base, size))
925 rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT);
929 rdreg->base = VGIC_ADDR_UNDEF;
931 ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size);
936 rdreg->count = count;
937 rdreg->free_index = 0;
938 rdreg->index = index;
940 list_add_tail(&rdreg->list, rd_regions);
947 void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
949 list_del(&rdreg->list);
953 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
957 mutex_lock(&kvm->arch.config_lock);
958 ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
959 mutex_unlock(&kvm->arch.config_lock);
964 * Register iodevs for each existing VCPU. Adding more VCPUs
965 * afterwards will register the iodevs when needed.
967 ret = vgic_register_all_redist_iodevs(kvm);
969 struct vgic_redist_region *rdreg;
971 mutex_lock(&kvm->arch.config_lock);
972 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
973 vgic_v3_free_redist_region(rdreg);
974 mutex_unlock(&kvm->arch.config_lock);
981 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
983 const struct vgic_register_region *region;
984 struct vgic_io_device iodev;
985 struct vgic_reg_attr reg_attr;
986 struct kvm_vcpu *vcpu;
990 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
994 vcpu = reg_attr.vcpu;
995 addr = reg_attr.addr;
997 switch (attr->group) {
998 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
999 iodev.regions = vgic_v3_dist_registers;
1000 iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
1001 iodev.base_addr = 0;
1003 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
1004 iodev.regions = vgic_v3_rd_registers;
1005 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
1006 iodev.base_addr = 0;
1009 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
1010 return vgic_v3_has_cpu_sysregs_attr(vcpu, attr);
1015 /* We only support aligned 32-bit accesses. */
1019 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
1027 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
1028 * so provide a wrapper to use the existing defines to isolate a certain
1031 #define SGI_AFFINITY_LEVEL(reg, level) \
1032 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
1033 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
1035 static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
1037 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi);
1038 unsigned long flags;
1040 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1043 * An access targeting Group0 SGIs can only generate
1044 * those, while an access targeting Group1 SGIs can
1045 * generate interrupts of either group.
1047 if (!irq->group || allow_group1) {
1049 irq->pending_latch = true;
1050 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
1052 /* HW SGI? Ask the GIC to inject it */
1054 err = irq_set_irqchip_state(irq->host_irq,
1055 IRQCHIP_STATE_PENDING,
1057 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
1058 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1061 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1064 vgic_put_irq(vcpu->kvm, irq);
1068 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
1069 * @vcpu: The VCPU requesting a SGI
1070 * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
1071 * @allow_group1: Does the sysreg access allow generation of G1 SGIs
1073 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
1074 * This will trap in sys_regs.c and call this function.
1075 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
1076 * target processors as well as a bitmask of 16 Aff0 CPUs.
1078 * If the interrupt routing mode bit is not set, we iterate over the Aff0
1079 * bits and signal the VCPUs matching the provided Aff{3,2,1}.
1081 * If this bit is set, we signal all, but not the calling VCPU.
1083 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
1085 struct kvm *kvm = vcpu->kvm;
1086 struct kvm_vcpu *c_vcpu;
1087 unsigned long target_cpus;
1092 sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
1095 if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
1096 kvm_for_each_vcpu(c, c_vcpu, kvm) {
1097 /* Don't signal the calling VCPU */
1101 vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
1107 /* We iterate over affinities to find the corresponding vcpus */
1108 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
1109 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
1110 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
1111 target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
1113 for_each_set_bit(aff0, &target_cpus, hweight_long(ICC_SGI1R_TARGET_LIST_MASK)) {
1114 c_vcpu = kvm_mpidr_to_vcpu(kvm, mpidr | aff0);
1116 vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
1120 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1121 int offset, u32 *val)
1123 struct vgic_io_device dev = {
1124 .regions = vgic_v3_dist_registers,
1125 .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
1128 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
1131 int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1132 int offset, u32 *val)
1134 struct vgic_io_device rd_dev = {
1135 .regions = vgic_v3_rd_registers,
1136 .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers),
1139 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
1142 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1143 u32 intid, u32 *val)
1149 vgic_write_irq_line_level_info(vcpu, intid, *val);
1151 *val = vgic_read_irq_line_level_info(vcpu, intid);