1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015, 2016 ARM Ltd.
6 #include <linux/irqchip/arm-gic.h>
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <asm/kvm_mmu.h>
14 static inline void vgic_v2_write_lr(int lr, u32 val)
16 void __iomem *base = kvm_vgic_global_state.vctrl_base;
18 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
21 void vgic_v2_init_lrs(void)
25 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
26 vgic_v2_write_lr(i, 0);
29 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
31 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
33 cpuif->vgic_hcr |= GICH_HCR_UIE;
36 static bool lr_signals_eoi_mi(u32 lr_val)
38 return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
39 !(lr_val & GICH_LR_HW);
43 * transfer the content of the LRs back into the corresponding ap_list:
44 * - active bit is transferred as is
46 * - transferred as is in case of edge sensitive IRQs
47 * - set to the line-level (resample time) for level sensitive IRQs
49 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
51 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
52 struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
55 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
57 cpuif->vgic_hcr &= ~GICH_HCR_UIE;
59 for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
60 u32 val = cpuif->vgic_lr[lr];
61 u32 cpuid, intid = val & GICH_LR_VIRTUALID;
65 /* Extract the source vCPU id from the LR */
66 cpuid = val & GICH_LR_PHYSID_CPUID;
67 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
70 /* Notify fds when the guest EOI'ed a level-triggered SPI */
71 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
72 kvm_notify_acked_irq(vcpu->kvm, 0,
73 intid - VGIC_NR_PRIVATE_IRQS);
75 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
77 raw_spin_lock(&irq->irq_lock);
79 /* Always preserve the active bit, note deactivation */
80 deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
81 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
83 if (irq->active && vgic_irq_is_sgi(intid))
84 irq->active_source = cpuid;
86 /* Edge is the only case where we preserve the pending bit */
87 if (irq->config == VGIC_CONFIG_EDGE &&
88 (val & GICH_LR_PENDING_BIT)) {
89 irq->pending_latch = true;
91 if (vgic_irq_is_sgi(intid))
92 irq->source |= (1 << cpuid);
96 * Clear soft pending state when level irqs have been acked.
98 if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
99 irq->pending_latch = false;
101 /* Handle resampling for mapped interrupts if required */
102 vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
104 raw_spin_unlock(&irq->irq_lock);
105 vgic_put_irq(vcpu->kvm, irq);
112 * Populates the particular LR with the state of a given IRQ:
113 * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
114 * - for a level sensitive IRQ the pending state value is unchanged;
115 * it is dictated directly by the input level
117 * If @irq describes an SGI with multiple sources, we choose the
118 * lowest-numbered source VCPU and clear that bit in the source bitmap.
120 * The irq_lock must be held by the caller.
122 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
124 u32 val = irq->intid;
125 bool allow_pending = true;
128 val |= GICH_LR_ACTIVE_BIT;
129 if (vgic_irq_is_sgi(irq->intid))
130 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
131 if (vgic_irq_is_multi_sgi(irq)) {
132 allow_pending = false;
138 val |= GICH_LR_GROUP1;
140 if (irq->hw && !vgic_irq_needs_resampling(irq)) {
142 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
144 * Never set pending+active on a HW interrupt, as the
145 * pending state is kept at the physical distributor
149 allow_pending = false;
151 if (irq->config == VGIC_CONFIG_LEVEL) {
155 * Software resampling doesn't work very well
156 * if we allow P+A, so let's not do that.
159 allow_pending = false;
163 if (allow_pending && irq_is_pending(irq)) {
164 val |= GICH_LR_PENDING_BIT;
166 if (irq->config == VGIC_CONFIG_EDGE)
167 irq->pending_latch = false;
169 if (vgic_irq_is_sgi(irq->intid)) {
170 u32 src = ffs(irq->source);
172 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
176 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
177 irq->source &= ~(1 << (src - 1));
179 irq->pending_latch = true;
186 * Level-triggered mapped IRQs are special because we only observe
187 * rising edges as input to the VGIC. We therefore lower the line
188 * level here, so that we can take new virtual IRQs. See
189 * vgic_v2_fold_lr_state for more info.
191 if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
192 irq->line_level = false;
194 /* The GICv2 LR only holds five bits of priority. */
195 val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
197 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
200 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
202 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
205 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
207 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
210 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
211 GICH_VMCR_ENABLE_GRP0_MASK;
212 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
213 GICH_VMCR_ENABLE_GRP1_MASK;
214 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
215 GICH_VMCR_ACK_CTL_MASK;
216 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
217 GICH_VMCR_FIQ_EN_MASK;
218 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
220 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
221 GICH_VMCR_EOI_MODE_MASK;
222 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
223 GICH_VMCR_ALIAS_BINPOINT_MASK;
224 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
225 GICH_VMCR_BINPOINT_MASK;
226 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
227 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
229 cpu_if->vgic_vmcr = vmcr;
232 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
234 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
237 vmcr = cpu_if->vgic_vmcr;
239 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
240 GICH_VMCR_ENABLE_GRP0_SHIFT;
241 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
242 GICH_VMCR_ENABLE_GRP1_SHIFT;
243 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
244 GICH_VMCR_ACK_CTL_SHIFT;
245 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
246 GICH_VMCR_FIQ_EN_SHIFT;
247 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
248 GICH_VMCR_CBPR_SHIFT;
249 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
250 GICH_VMCR_EOI_MODE_SHIFT;
252 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
253 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
254 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
255 GICH_VMCR_BINPOINT_SHIFT;
256 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
257 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
260 void vgic_v2_enable(struct kvm_vcpu *vcpu)
263 * By forcing VMCR to zero, the GIC will restore the binary
264 * points to their reset values. Anything else resets to zero
267 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
269 /* Get the show on the road... */
270 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
273 /* check for overlapping regions and for regions crossing the end of memory */
274 static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
276 if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
278 if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
281 if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
283 if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
289 int vgic_v2_map_resources(struct kvm *kvm)
291 struct vgic_dist *dist = &kvm->arch.vgic;
294 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
295 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
296 kvm_debug("Need to set vgic cpu and dist addresses first\n");
300 if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
301 kvm_debug("VGIC CPU and dist frames overlap\n");
306 * Initialize the vgic if this hasn't already been done on demand by
307 * accessing the vgic state from userspace.
309 ret = vgic_init(kvm);
311 kvm_err("Unable to initialize VGIC dynamic data structures\n");
315 if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
316 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
317 kvm_vgic_global_state.vcpu_base,
318 KVM_VGIC_V2_CPU_SIZE, true);
320 kvm_err("Unable to remap VGIC CPU to VCPU\n");
328 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
331 * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller
332 * @info: pointer to the GIC description
334 * Returns 0 if the VGICv2 has been probed successfully, returns an error code
337 int vgic_v2_probe(const struct gic_kvm_info *info)
342 if (is_protected_kvm_enabled()) {
343 kvm_err("GICv2 not supported in protected mode\n");
347 if (!info->vctrl.start) {
348 kvm_err("GICH not present in the firmware table\n");
352 if (!PAGE_ALIGNED(info->vcpu.start) ||
353 !PAGE_ALIGNED(resource_size(&info->vcpu))) {
354 kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
356 ret = create_hyp_io_mappings(info->vcpu.start,
357 resource_size(&info->vcpu),
358 &kvm_vgic_global_state.vcpu_base_va,
359 &kvm_vgic_global_state.vcpu_hyp_va);
361 kvm_err("Cannot map GICV into hyp\n");
365 static_branch_enable(&vgic_v2_cpuif_trap);
368 ret = create_hyp_io_mappings(info->vctrl.start,
369 resource_size(&info->vctrl),
370 &kvm_vgic_global_state.vctrl_base,
371 &kvm_vgic_global_state.vctrl_hyp);
373 kvm_err("Cannot map VCTRL into hyp\n");
377 vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
378 kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
380 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
382 kvm_err("Cannot register GICv2 KVM device\n");
386 kvm_vgic_global_state.can_emulate_gicv2 = true;
387 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
388 kvm_vgic_global_state.type = VGIC_V2;
389 kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
391 kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
395 if (kvm_vgic_global_state.vctrl_base)
396 iounmap(kvm_vgic_global_state.vctrl_base);
397 if (kvm_vgic_global_state.vcpu_base_va)
398 iounmap(kvm_vgic_global_state.vcpu_base_va);
403 static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
405 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
406 u64 used_lrs = cpu_if->used_lrs;
410 elrsr = readl_relaxed(base + GICH_ELRSR0);
411 if (unlikely(used_lrs > 32))
412 elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
414 for (i = 0; i < used_lrs; i++) {
415 if (elrsr & (1UL << i))
416 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
418 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
420 writel_relaxed(0, base + GICH_LR0 + (i * 4));
424 void vgic_v2_save_state(struct kvm_vcpu *vcpu)
426 void __iomem *base = kvm_vgic_global_state.vctrl_base;
427 u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
433 save_lrs(vcpu, base);
434 writel_relaxed(0, base + GICH_HCR);
438 void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
440 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
441 void __iomem *base = kvm_vgic_global_state.vctrl_base;
442 u64 used_lrs = cpu_if->used_lrs;
449 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
450 for (i = 0; i < used_lrs; i++) {
451 writel_relaxed(cpu_if->vgic_lr[i],
452 base + GICH_LR0 + (i * 4));
457 void vgic_v2_load(struct kvm_vcpu *vcpu)
459 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
461 writel_relaxed(cpu_if->vgic_vmcr,
462 kvm_vgic_global_state.vctrl_base + GICH_VMCR);
463 writel_relaxed(cpu_if->vgic_apr,
464 kvm_vgic_global_state.vctrl_base + GICH_APR);
467 void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
469 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
471 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
474 void vgic_v2_put(struct kvm_vcpu *vcpu)
476 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
478 vgic_v2_vmcr_sync(vcpu);
479 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);