1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bsearch.h>
13 #include <linux/kvm_host.h>
15 #include <linux/printk.h>
16 #include <linux/uaccess.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
22 #include <asm/kvm_arm.h>
23 #include <asm/kvm_coproc.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_host.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/perf_event.h>
29 #include <asm/sysreg.h>
31 #include <trace/events/kvm.h>
38 * All of this file is extremly similar to the ARM coproc.c, but the
39 * types are different. My gut feeling is that it should be pretty
40 * easy to merge, but that would be an ABI breakage -- again. VFP
41 * would also need to be abstracted.
43 * For AArch32, we only take care of what is being trapped. Anything
44 * that has to do with init and userspace access has to go via the
48 static bool read_from_write_only(struct kvm_vcpu *vcpu,
49 struct sys_reg_params *params,
50 const struct sys_reg_desc *r)
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params);
54 kvm_inject_undefined(vcpu);
58 static bool write_to_read_only(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r)
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params);
64 kvm_inject_undefined(vcpu);
68 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
70 if (!vcpu->arch.sysregs_loaded_on_cpu)
74 * System registers listed in the switch are not saved on every
75 * exit from the guest but are only saved on vcpu_put.
77 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
78 * should never be listed below, because the guest cannot modify its
79 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
80 * thread when emulating cross-VCPU communication.
83 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
84 case SCTLR_EL1: return read_sysreg_s(SYS_SCTLR_EL12);
85 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
86 case CPACR_EL1: return read_sysreg_s(SYS_CPACR_EL12);
87 case TTBR0_EL1: return read_sysreg_s(SYS_TTBR0_EL12);
88 case TTBR1_EL1: return read_sysreg_s(SYS_TTBR1_EL12);
89 case TCR_EL1: return read_sysreg_s(SYS_TCR_EL12);
90 case ESR_EL1: return read_sysreg_s(SYS_ESR_EL12);
91 case AFSR0_EL1: return read_sysreg_s(SYS_AFSR0_EL12);
92 case AFSR1_EL1: return read_sysreg_s(SYS_AFSR1_EL12);
93 case FAR_EL1: return read_sysreg_s(SYS_FAR_EL12);
94 case MAIR_EL1: return read_sysreg_s(SYS_MAIR_EL12);
95 case VBAR_EL1: return read_sysreg_s(SYS_VBAR_EL12);
96 case CONTEXTIDR_EL1: return read_sysreg_s(SYS_CONTEXTIDR_EL12);
97 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
98 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
99 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
100 case AMAIR_EL1: return read_sysreg_s(SYS_AMAIR_EL12);
101 case CNTKCTL_EL1: return read_sysreg_s(SYS_CNTKCTL_EL12);
102 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
103 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
104 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
105 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
109 return __vcpu_sys_reg(vcpu, reg);
112 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
114 if (!vcpu->arch.sysregs_loaded_on_cpu)
115 goto immediate_write;
118 * System registers listed in the switch are not restored on every
119 * entry to the guest but are only restored on vcpu_load.
121 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
122 * should never be listed below, because the the MPIDR should only be
123 * set once, before running the VCPU, and never changed later.
126 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
127 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); return;
128 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
129 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); return;
130 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); return;
131 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); return;
132 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); return;
133 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); return;
134 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); return;
135 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); return;
136 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); return;
137 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); return;
138 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); return;
139 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
140 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
141 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
142 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
143 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); return;
144 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); return;
145 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
146 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
147 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
148 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
152 __vcpu_sys_reg(vcpu, reg) = val;
155 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
156 static u32 cache_levels;
158 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
159 #define CSSELR_MAX 12
161 /* Which cache CCSIDR represents depends on CSSELR value. */
162 static u32 get_ccsidr(u32 csselr)
166 /* Make sure noone else changes CSSELR during this! */
168 write_sysreg(csselr, csselr_el1);
170 ccsidr = read_sysreg(ccsidr_el1);
177 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
179 static bool access_dcsw(struct kvm_vcpu *vcpu,
180 struct sys_reg_params *p,
181 const struct sys_reg_desc *r)
184 return read_from_write_only(vcpu, p, r);
187 * Only track S/W ops if we don't have FWB. It still indicates
188 * that the guest is a bit broken (S/W operations should only
189 * be done by firmware, knowing that there is only a single
190 * CPU left in the system, and certainly not from non-secure
193 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
194 kvm_set_way_flush(vcpu);
200 * Generic accessor for VM registers. Only called as long as HCR_TVM
201 * is set. If the guest enables the MMU, we stop trapping the VM
202 * sys_regs and leave it in complete control of the caches.
204 static bool access_vm_reg(struct kvm_vcpu *vcpu,
205 struct sys_reg_params *p,
206 const struct sys_reg_desc *r)
208 bool was_enabled = vcpu_has_cache_enabled(vcpu);
212 BUG_ON(!p->is_write);
214 /* See the 32bit mapping in kvm_host.h */
218 if (!p->is_aarch32 || !p->is_32bit) {
221 val = vcpu_read_sys_reg(vcpu, reg);
223 val = (p->regval << 32) | (u64)lower_32_bits(val);
225 val = ((u64)upper_32_bits(val) << 32) |
226 lower_32_bits(p->regval);
228 vcpu_write_sys_reg(vcpu, val, reg);
230 kvm_toggle_cache(vcpu, was_enabled);
235 * Trap handler for the GICv3 SGI generation system register.
236 * Forward the request to the VGIC emulation.
237 * The cp15_64 code makes sure this automatically works
238 * for both AArch64 and AArch32 accesses.
240 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
241 struct sys_reg_params *p,
242 const struct sys_reg_desc *r)
247 return read_from_write_only(vcpu, p, r);
250 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
251 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
252 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
253 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
258 default: /* Keep GCC quiet */
259 case 0: /* ICC_SGI1R */
262 case 1: /* ICC_ASGI1R */
263 case 2: /* ICC_SGI0R */
269 default: /* Keep GCC quiet */
270 case 5: /* ICC_SGI1R_EL1 */
273 case 6: /* ICC_ASGI1R_EL1 */
274 case 7: /* ICC_SGI0R_EL1 */
280 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
285 static bool access_gic_sre(struct kvm_vcpu *vcpu,
286 struct sys_reg_params *p,
287 const struct sys_reg_desc *r)
290 return ignore_write(vcpu, p);
292 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
296 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *r)
301 return ignore_write(vcpu, p);
303 return read_zero(vcpu, p);
307 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
308 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
309 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
310 * treat it separately.
312 static bool trap_loregion(struct kvm_vcpu *vcpu,
313 struct sys_reg_params *p,
314 const struct sys_reg_desc *r)
316 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
317 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
318 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
320 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
321 kvm_inject_undefined(vcpu);
325 if (p->is_write && sr == SYS_LORID_EL1)
326 return write_to_read_only(vcpu, p, r);
328 return trap_raz_wi(vcpu, p, r);
331 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
332 struct sys_reg_params *p,
333 const struct sys_reg_desc *r)
336 return ignore_write(vcpu, p);
338 p->regval = (1 << 3);
343 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
344 struct sys_reg_params *p,
345 const struct sys_reg_desc *r)
348 return ignore_write(vcpu, p);
350 p->regval = read_sysreg(dbgauthstatus_el1);
356 * We want to avoid world-switching all the DBG registers all the
359 * - If we've touched any debug register, it is likely that we're
360 * going to touch more of them. It then makes sense to disable the
361 * traps and start doing the save/restore dance
362 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
363 * then mandatory to save/restore the registers, as the guest
366 * For this, we use a DIRTY bit, indicating the guest has modified the
367 * debug registers, used as follow:
370 * - If the dirty bit is set (because we're coming back from trapping),
371 * disable the traps, save host registers, restore guest registers.
372 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
373 * set the dirty bit, disable the traps, save host registers,
374 * restore guest registers.
375 * - Otherwise, enable the traps
378 * - If the dirty bit is set, save guest registers, restore host
379 * registers and clear the dirty bit. This ensure that the host can
380 * now use the debug registers.
382 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
383 struct sys_reg_params *p,
384 const struct sys_reg_desc *r)
387 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
388 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
390 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
393 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
399 * reg_to_dbg/dbg_to_reg
401 * A 32 bit write to a debug register leave top bits alone
402 * A 32 bit read from a debug register only returns the bottom bits
404 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
405 * hyp.S code switches between host and guest values in future.
407 static void reg_to_dbg(struct kvm_vcpu *vcpu,
408 struct sys_reg_params *p,
415 val |= ((*dbg_reg >> 32) << 32);
419 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
422 static void dbg_to_reg(struct kvm_vcpu *vcpu,
423 struct sys_reg_params *p,
426 p->regval = *dbg_reg;
428 p->regval &= 0xffffffffUL;
431 static bool trap_bvr(struct kvm_vcpu *vcpu,
432 struct sys_reg_params *p,
433 const struct sys_reg_desc *rd)
435 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
438 reg_to_dbg(vcpu, p, dbg_reg);
440 dbg_to_reg(vcpu, p, dbg_reg);
442 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
447 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
448 const struct kvm_one_reg *reg, void __user *uaddr)
450 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
452 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
457 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
458 const struct kvm_one_reg *reg, void __user *uaddr)
460 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
462 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
467 static void reset_bvr(struct kvm_vcpu *vcpu,
468 const struct sys_reg_desc *rd)
470 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
473 static bool trap_bcr(struct kvm_vcpu *vcpu,
474 struct sys_reg_params *p,
475 const struct sys_reg_desc *rd)
477 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
480 reg_to_dbg(vcpu, p, dbg_reg);
482 dbg_to_reg(vcpu, p, dbg_reg);
484 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
489 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
490 const struct kvm_one_reg *reg, void __user *uaddr)
492 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
494 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
500 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
501 const struct kvm_one_reg *reg, void __user *uaddr)
503 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
505 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
510 static void reset_bcr(struct kvm_vcpu *vcpu,
511 const struct sys_reg_desc *rd)
513 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
516 static bool trap_wvr(struct kvm_vcpu *vcpu,
517 struct sys_reg_params *p,
518 const struct sys_reg_desc *rd)
520 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
523 reg_to_dbg(vcpu, p, dbg_reg);
525 dbg_to_reg(vcpu, p, dbg_reg);
527 trace_trap_reg(__func__, rd->CRm, p->is_write,
528 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
533 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
534 const struct kvm_one_reg *reg, void __user *uaddr)
536 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
538 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
543 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
544 const struct kvm_one_reg *reg, void __user *uaddr)
546 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
548 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
553 static void reset_wvr(struct kvm_vcpu *vcpu,
554 const struct sys_reg_desc *rd)
556 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
559 static bool trap_wcr(struct kvm_vcpu *vcpu,
560 struct sys_reg_params *p,
561 const struct sys_reg_desc *rd)
563 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
566 reg_to_dbg(vcpu, p, dbg_reg);
568 dbg_to_reg(vcpu, p, dbg_reg);
570 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
575 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
576 const struct kvm_one_reg *reg, void __user *uaddr)
578 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
580 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
585 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
586 const struct kvm_one_reg *reg, void __user *uaddr)
588 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
590 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
595 static void reset_wcr(struct kvm_vcpu *vcpu,
596 const struct sys_reg_desc *rd)
598 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
601 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
603 u64 amair = read_sysreg(amair_el1);
604 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
607 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
612 * Map the vcpu_id into the first three affinity level fields of
613 * the MPIDR. We limit the number of VCPUs in level 0 due to a
614 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
615 * of the GICv3 to be able to address each CPU directly when
618 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
619 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
620 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
621 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
624 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
628 /* No PMU available, PMCR_EL0 may UNDEF... */
629 if (!kvm_arm_support_pmu_v3())
632 pmcr = read_sysreg(pmcr_el0);
634 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
635 * except PMCR.E resetting to zero.
637 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
638 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
639 if (!system_supports_32bit_el0())
640 val |= ARMV8_PMU_PMCR_LC;
641 __vcpu_sys_reg(vcpu, r->reg) = val;
644 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
646 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
647 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
650 kvm_inject_undefined(vcpu);
655 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
657 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
660 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
662 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
665 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
667 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
670 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
672 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
675 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
676 const struct sys_reg_desc *r)
680 if (!kvm_arm_pmu_v3_ready(vcpu))
681 return trap_raz_wi(vcpu, p, r);
683 if (pmu_access_el0_disabled(vcpu))
687 /* Only update writeable bits of PMCR */
688 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
689 val &= ~ARMV8_PMU_PMCR_MASK;
690 val |= p->regval & ARMV8_PMU_PMCR_MASK;
691 if (!system_supports_32bit_el0())
692 val |= ARMV8_PMU_PMCR_LC;
693 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
694 kvm_pmu_handle_pmcr(vcpu, val);
695 kvm_vcpu_pmu_restore_guest(vcpu);
697 /* PMCR.P & PMCR.C are RAZ */
698 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
699 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
706 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
707 const struct sys_reg_desc *r)
709 if (!kvm_arm_pmu_v3_ready(vcpu))
710 return trap_raz_wi(vcpu, p, r);
712 if (pmu_access_event_counter_el0_disabled(vcpu))
716 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
718 /* return PMSELR.SEL field */
719 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
720 & ARMV8_PMU_COUNTER_MASK;
725 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
726 const struct sys_reg_desc *r)
730 if (!kvm_arm_pmu_v3_ready(vcpu))
731 return trap_raz_wi(vcpu, p, r);
735 if (pmu_access_el0_disabled(vcpu))
739 pmceid = read_sysreg(pmceid0_el0);
741 pmceid = read_sysreg(pmceid1_el0);
748 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
752 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
753 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
754 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
755 kvm_inject_undefined(vcpu);
762 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
763 struct sys_reg_params *p,
764 const struct sys_reg_desc *r)
768 if (!kvm_arm_pmu_v3_ready(vcpu))
769 return trap_raz_wi(vcpu, p, r);
771 if (r->CRn == 9 && r->CRm == 13) {
774 if (pmu_access_event_counter_el0_disabled(vcpu))
777 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
778 & ARMV8_PMU_COUNTER_MASK;
779 } else if (r->Op2 == 0) {
781 if (pmu_access_cycle_counter_el0_disabled(vcpu))
784 idx = ARMV8_PMU_CYCLE_IDX;
788 } else if (r->CRn == 0 && r->CRm == 9) {
790 if (pmu_access_event_counter_el0_disabled(vcpu))
793 idx = ARMV8_PMU_CYCLE_IDX;
794 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
796 if (pmu_access_event_counter_el0_disabled(vcpu))
799 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
804 if (!pmu_counter_idx_valid(vcpu, idx))
808 if (pmu_access_el0_disabled(vcpu))
811 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
813 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
819 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
820 const struct sys_reg_desc *r)
824 if (!kvm_arm_pmu_v3_ready(vcpu))
825 return trap_raz_wi(vcpu, p, r);
827 if (pmu_access_el0_disabled(vcpu))
830 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
832 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
833 reg = PMEVTYPER0_EL0 + idx;
834 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
835 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
836 if (idx == ARMV8_PMU_CYCLE_IDX)
840 reg = PMEVTYPER0_EL0 + idx;
845 if (!pmu_counter_idx_valid(vcpu, idx))
849 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
850 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
851 kvm_vcpu_pmu_restore_guest(vcpu);
853 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
859 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
860 const struct sys_reg_desc *r)
864 if (!kvm_arm_pmu_v3_ready(vcpu))
865 return trap_raz_wi(vcpu, p, r);
867 if (pmu_access_el0_disabled(vcpu))
870 mask = kvm_pmu_valid_counter_mask(vcpu);
872 val = p->regval & mask;
874 /* accessing PMCNTENSET_EL0 */
875 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
876 kvm_pmu_enable_counter_mask(vcpu, val);
877 kvm_vcpu_pmu_restore_guest(vcpu);
879 /* accessing PMCNTENCLR_EL0 */
880 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
881 kvm_pmu_disable_counter_mask(vcpu, val);
884 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
890 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
891 const struct sys_reg_desc *r)
893 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
895 if (!kvm_arm_pmu_v3_ready(vcpu))
896 return trap_raz_wi(vcpu, p, r);
898 if (!vcpu_mode_priv(vcpu)) {
899 kvm_inject_undefined(vcpu);
904 u64 val = p->regval & mask;
907 /* accessing PMINTENSET_EL1 */
908 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
910 /* accessing PMINTENCLR_EL1 */
911 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
913 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
919 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
920 const struct sys_reg_desc *r)
922 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
924 if (!kvm_arm_pmu_v3_ready(vcpu))
925 return trap_raz_wi(vcpu, p, r);
927 if (pmu_access_el0_disabled(vcpu))
932 /* accessing PMOVSSET_EL0 */
933 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
935 /* accessing PMOVSCLR_EL0 */
936 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
938 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
944 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
945 const struct sys_reg_desc *r)
949 if (!kvm_arm_pmu_v3_ready(vcpu))
950 return trap_raz_wi(vcpu, p, r);
953 return read_from_write_only(vcpu, p, r);
955 if (pmu_write_swinc_el0_disabled(vcpu))
958 mask = kvm_pmu_valid_counter_mask(vcpu);
959 kvm_pmu_software_increment(vcpu, p->regval & mask);
963 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
964 const struct sys_reg_desc *r)
966 if (!kvm_arm_pmu_v3_ready(vcpu))
967 return trap_raz_wi(vcpu, p, r);
970 if (!vcpu_mode_priv(vcpu)) {
971 kvm_inject_undefined(vcpu);
975 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
976 p->regval & ARMV8_PMU_USERENR_MASK;
978 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
979 & ARMV8_PMU_USERENR_MASK;
985 #define reg_to_encoding(x) \
986 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
987 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
989 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
990 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
991 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
992 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
993 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
994 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
995 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
996 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
997 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
998 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1000 /* Macro to expand the PMEVCNTRn_EL0 register */
1001 #define PMU_PMEVCNTR_EL0(n) \
1002 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
1003 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1005 /* Macro to expand the PMEVTYPERn_EL0 register */
1006 #define PMU_PMEVTYPER_EL0(n) \
1007 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1008 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1010 static bool trap_ptrauth(struct kvm_vcpu *vcpu,
1011 struct sys_reg_params *p,
1012 const struct sys_reg_desc *rd)
1014 kvm_arm_vcpu_ptrauth_trap(vcpu);
1017 * Return false for both cases as we never skip the trapped
1020 * - Either we re-execute the same key register access instruction
1021 * after enabling ptrauth.
1022 * - Or an UNDEF is injected as ptrauth is not supported/enabled.
1027 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1028 const struct sys_reg_desc *rd)
1030 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1033 #define __PTRAUTH_KEY(k) \
1034 { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
1035 .visibility = ptrauth_visibility}
1037 #define PTRAUTH_KEY(k) \
1038 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1039 __PTRAUTH_KEY(k ## KEYHI_EL1)
1041 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1042 struct sys_reg_params *p,
1043 const struct sys_reg_desc *r)
1045 enum kvm_arch_timers tmr;
1046 enum kvm_arch_timer_regs treg;
1047 u64 reg = reg_to_encoding(r);
1050 case SYS_CNTP_TVAL_EL0:
1051 case SYS_AARCH32_CNTP_TVAL:
1053 treg = TIMER_REG_TVAL;
1055 case SYS_CNTP_CTL_EL0:
1056 case SYS_AARCH32_CNTP_CTL:
1058 treg = TIMER_REG_CTL;
1060 case SYS_CNTP_CVAL_EL0:
1061 case SYS_AARCH32_CNTP_CVAL:
1063 treg = TIMER_REG_CVAL;
1070 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1072 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1077 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1078 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1079 struct sys_reg_desc const *r, bool raz)
1081 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1082 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1083 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1085 if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
1086 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1087 } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1088 val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1089 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1090 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1091 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1092 } else if (id == SYS_ID_AA64DFR0_EL1) {
1093 /* Limit guests to PMUv3 for ARMv8.1 */
1094 val = cpuid_feature_cap_perfmon_field(val,
1095 ID_AA64DFR0_PMUVER_SHIFT,
1096 ID_AA64DFR0_PMUVER_8_1);
1097 } else if (id == SYS_ID_DFR0_EL1) {
1098 /* Limit guests to PMUv3 for ARMv8.1 */
1099 val = cpuid_feature_cap_perfmon_field(val,
1100 ID_DFR0_PERFMON_SHIFT,
1101 ID_DFR0_PERFMON_8_1);
1107 /* cpufeature ID register access trap handlers */
1109 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1110 struct sys_reg_params *p,
1111 const struct sys_reg_desc *r,
1115 return write_to_read_only(vcpu, p, r);
1117 p->regval = read_id_reg(vcpu, r, raz);
1121 static bool access_id_reg(struct kvm_vcpu *vcpu,
1122 struct sys_reg_params *p,
1123 const struct sys_reg_desc *r)
1125 return __access_id_reg(vcpu, p, r, false);
1128 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1129 struct sys_reg_params *p,
1130 const struct sys_reg_desc *r)
1132 return __access_id_reg(vcpu, p, r, true);
1135 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1136 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1137 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1139 /* Visibility overrides for SVE-specific control registers */
1140 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1141 const struct sys_reg_desc *rd)
1143 if (vcpu_has_sve(vcpu))
1146 return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1149 /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
1150 static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
1152 if (!vcpu_has_sve(vcpu))
1155 return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
1158 static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1159 struct sys_reg_params *p,
1160 const struct sys_reg_desc *rd)
1163 return write_to_read_only(vcpu, p, rd);
1165 p->regval = guest_id_aa64zfr0_el1(vcpu);
1169 static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1170 const struct sys_reg_desc *rd,
1171 const struct kvm_one_reg *reg, void __user *uaddr)
1175 val = guest_id_aa64zfr0_el1(vcpu);
1176 return reg_to_user(uaddr, &val, reg->id);
1179 static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1180 const struct sys_reg_desc *rd,
1181 const struct kvm_one_reg *reg, void __user *uaddr)
1183 const u64 id = sys_reg_to_index(rd);
1187 err = reg_from_user(&val, uaddr, id);
1191 /* This is what we mean by invariant: you can't change it. */
1192 if (val != guest_id_aa64zfr0_el1(vcpu))
1199 * cpufeature ID register user accessors
1201 * For now, these registers are immutable for userspace, so no values
1202 * are stored, and for set_id_reg() we don't allow the effective value
1205 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1206 const struct sys_reg_desc *rd, void __user *uaddr,
1209 const u64 id = sys_reg_to_index(rd);
1210 const u64 val = read_id_reg(vcpu, rd, raz);
1212 return reg_to_user(uaddr, &val, id);
1215 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1216 const struct sys_reg_desc *rd, void __user *uaddr,
1219 const u64 id = sys_reg_to_index(rd);
1223 err = reg_from_user(&val, uaddr, id);
1227 /* This is what we mean by invariant: you can't change it. */
1228 if (val != read_id_reg(vcpu, rd, raz))
1234 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1235 const struct kvm_one_reg *reg, void __user *uaddr)
1237 return __get_id_reg(vcpu, rd, uaddr, false);
1240 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1241 const struct kvm_one_reg *reg, void __user *uaddr)
1243 return __set_id_reg(vcpu, rd, uaddr, false);
1246 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1247 const struct kvm_one_reg *reg, void __user *uaddr)
1249 return __get_id_reg(vcpu, rd, uaddr, true);
1252 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1253 const struct kvm_one_reg *reg, void __user *uaddr)
1255 return __set_id_reg(vcpu, rd, uaddr, true);
1258 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1259 const struct sys_reg_desc *r)
1262 return write_to_read_only(vcpu, p, r);
1264 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1268 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1269 const struct sys_reg_desc *r)
1272 return write_to_read_only(vcpu, p, r);
1274 p->regval = read_sysreg(clidr_el1);
1278 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1279 const struct sys_reg_desc *r)
1283 /* See the 32bit mapping in kvm_host.h */
1288 vcpu_write_sys_reg(vcpu, p->regval, reg);
1290 p->regval = vcpu_read_sys_reg(vcpu, reg);
1294 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1295 const struct sys_reg_desc *r)
1300 return write_to_read_only(vcpu, p, r);
1302 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1303 p->regval = get_ccsidr(csselr);
1306 * Guests should not be doing cache operations by set/way at all, and
1307 * for this reason, we trap them and attempt to infer the intent, so
1308 * that we can flush the entire guest's address space at the appropriate
1310 * To prevent this trapping from causing performance problems, let's
1311 * expose the geometry of all data and unified caches (which are
1312 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1313 * [If guests should attempt to infer aliasing properties from the
1314 * geometry (which is not permitted by the architecture), they would
1315 * only do so for virtually indexed caches.]
1317 if (!(csselr & 1)) // data or unified cache
1318 p->regval &= ~GENMASK(27, 3);
1322 /* sys_reg_desc initialiser for known cpufeature ID registers */
1323 #define ID_SANITISED(name) { \
1324 SYS_DESC(SYS_##name), \
1325 .access = access_id_reg, \
1326 .get_user = get_id_reg, \
1327 .set_user = set_id_reg, \
1331 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1332 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1333 * (1 <= crm < 8, 0 <= Op2 < 8).
1335 #define ID_UNALLOCATED(crm, op2) { \
1336 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1337 .access = access_raz_id_reg, \
1338 .get_user = get_raz_id_reg, \
1339 .set_user = set_raz_id_reg, \
1343 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1344 * For now, these are exposed just like unallocated ID regs: they appear
1345 * RAZ for the guest.
1347 #define ID_HIDDEN(name) { \
1348 SYS_DESC(SYS_##name), \
1349 .access = access_raz_id_reg, \
1350 .get_user = get_raz_id_reg, \
1351 .set_user = set_raz_id_reg, \
1355 * Architected system registers.
1356 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1358 * Debug handling: We do trap most, if not all debug related system
1359 * registers. The implementation is good enough to ensure that a guest
1360 * can use these with minimal performance degradation. The drawback is
1361 * that we don't implement any of the external debug, none of the
1362 * OSlock protocol. This should be revisited if we ever encounter a
1363 * more demanding guest...
1365 static const struct sys_reg_desc sys_reg_descs[] = {
1366 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1367 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1368 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1370 DBG_BCR_BVR_WCR_WVR_EL1(0),
1371 DBG_BCR_BVR_WCR_WVR_EL1(1),
1372 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1373 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1374 DBG_BCR_BVR_WCR_WVR_EL1(2),
1375 DBG_BCR_BVR_WCR_WVR_EL1(3),
1376 DBG_BCR_BVR_WCR_WVR_EL1(4),
1377 DBG_BCR_BVR_WCR_WVR_EL1(5),
1378 DBG_BCR_BVR_WCR_WVR_EL1(6),
1379 DBG_BCR_BVR_WCR_WVR_EL1(7),
1380 DBG_BCR_BVR_WCR_WVR_EL1(8),
1381 DBG_BCR_BVR_WCR_WVR_EL1(9),
1382 DBG_BCR_BVR_WCR_WVR_EL1(10),
1383 DBG_BCR_BVR_WCR_WVR_EL1(11),
1384 DBG_BCR_BVR_WCR_WVR_EL1(12),
1385 DBG_BCR_BVR_WCR_WVR_EL1(13),
1386 DBG_BCR_BVR_WCR_WVR_EL1(14),
1387 DBG_BCR_BVR_WCR_WVR_EL1(15),
1389 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1390 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1391 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1392 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1393 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1394 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1395 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1396 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1398 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1399 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1400 // DBGDTR[TR]X_EL0 share the same encoding
1401 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1403 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1405 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1408 * ID regs: all ID_SANITISED() entries here must have corresponding
1409 * entries in arm64_ftr_regs[].
1412 /* AArch64 mappings of the AArch32 ID registers */
1414 ID_SANITISED(ID_PFR0_EL1),
1415 ID_SANITISED(ID_PFR1_EL1),
1416 ID_SANITISED(ID_DFR0_EL1),
1417 ID_HIDDEN(ID_AFR0_EL1),
1418 ID_SANITISED(ID_MMFR0_EL1),
1419 ID_SANITISED(ID_MMFR1_EL1),
1420 ID_SANITISED(ID_MMFR2_EL1),
1421 ID_SANITISED(ID_MMFR3_EL1),
1424 ID_SANITISED(ID_ISAR0_EL1),
1425 ID_SANITISED(ID_ISAR1_EL1),
1426 ID_SANITISED(ID_ISAR2_EL1),
1427 ID_SANITISED(ID_ISAR3_EL1),
1428 ID_SANITISED(ID_ISAR4_EL1),
1429 ID_SANITISED(ID_ISAR5_EL1),
1430 ID_SANITISED(ID_MMFR4_EL1),
1431 ID_UNALLOCATED(2,7),
1434 ID_SANITISED(MVFR0_EL1),
1435 ID_SANITISED(MVFR1_EL1),
1436 ID_SANITISED(MVFR2_EL1),
1437 ID_UNALLOCATED(3,3),
1438 ID_UNALLOCATED(3,4),
1439 ID_UNALLOCATED(3,5),
1440 ID_UNALLOCATED(3,6),
1441 ID_UNALLOCATED(3,7),
1443 /* AArch64 ID registers */
1445 ID_SANITISED(ID_AA64PFR0_EL1),
1446 ID_SANITISED(ID_AA64PFR1_EL1),
1447 ID_UNALLOCATED(4,2),
1448 ID_UNALLOCATED(4,3),
1449 { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, },
1450 ID_UNALLOCATED(4,5),
1451 ID_UNALLOCATED(4,6),
1452 ID_UNALLOCATED(4,7),
1455 ID_SANITISED(ID_AA64DFR0_EL1),
1456 ID_SANITISED(ID_AA64DFR1_EL1),
1457 ID_UNALLOCATED(5,2),
1458 ID_UNALLOCATED(5,3),
1459 ID_HIDDEN(ID_AA64AFR0_EL1),
1460 ID_HIDDEN(ID_AA64AFR1_EL1),
1461 ID_UNALLOCATED(5,6),
1462 ID_UNALLOCATED(5,7),
1465 ID_SANITISED(ID_AA64ISAR0_EL1),
1466 ID_SANITISED(ID_AA64ISAR1_EL1),
1467 ID_SANITISED(ID_AA64ISAR2_EL1),
1468 ID_UNALLOCATED(6,3),
1469 ID_UNALLOCATED(6,4),
1470 ID_UNALLOCATED(6,5),
1471 ID_UNALLOCATED(6,6),
1472 ID_UNALLOCATED(6,7),
1475 ID_SANITISED(ID_AA64MMFR0_EL1),
1476 ID_SANITISED(ID_AA64MMFR1_EL1),
1477 ID_SANITISED(ID_AA64MMFR2_EL1),
1478 ID_UNALLOCATED(7,3),
1479 ID_UNALLOCATED(7,4),
1480 ID_UNALLOCATED(7,5),
1481 ID_UNALLOCATED(7,6),
1482 ID_UNALLOCATED(7,7),
1484 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1485 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1486 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1487 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1488 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1489 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1497 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1498 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1499 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1501 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1502 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1503 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1504 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1505 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1506 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1507 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1508 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1510 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1511 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1513 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1514 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1516 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1517 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1519 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1520 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1521 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1522 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1523 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1525 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1526 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1528 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1529 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1530 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1531 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1532 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1533 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1534 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1535 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1536 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1537 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1538 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1539 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1541 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1542 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1544 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1546 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1547 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1548 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1549 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1551 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1552 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1553 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1554 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1555 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1556 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1557 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1558 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1559 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1560 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1561 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1563 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1564 * in 32bit mode. Here we choose to reset it as zero for consistency.
1566 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1567 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1569 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1570 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1572 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1573 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1574 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1577 PMU_PMEVCNTR_EL0(0),
1578 PMU_PMEVCNTR_EL0(1),
1579 PMU_PMEVCNTR_EL0(2),
1580 PMU_PMEVCNTR_EL0(3),
1581 PMU_PMEVCNTR_EL0(4),
1582 PMU_PMEVCNTR_EL0(5),
1583 PMU_PMEVCNTR_EL0(6),
1584 PMU_PMEVCNTR_EL0(7),
1585 PMU_PMEVCNTR_EL0(8),
1586 PMU_PMEVCNTR_EL0(9),
1587 PMU_PMEVCNTR_EL0(10),
1588 PMU_PMEVCNTR_EL0(11),
1589 PMU_PMEVCNTR_EL0(12),
1590 PMU_PMEVCNTR_EL0(13),
1591 PMU_PMEVCNTR_EL0(14),
1592 PMU_PMEVCNTR_EL0(15),
1593 PMU_PMEVCNTR_EL0(16),
1594 PMU_PMEVCNTR_EL0(17),
1595 PMU_PMEVCNTR_EL0(18),
1596 PMU_PMEVCNTR_EL0(19),
1597 PMU_PMEVCNTR_EL0(20),
1598 PMU_PMEVCNTR_EL0(21),
1599 PMU_PMEVCNTR_EL0(22),
1600 PMU_PMEVCNTR_EL0(23),
1601 PMU_PMEVCNTR_EL0(24),
1602 PMU_PMEVCNTR_EL0(25),
1603 PMU_PMEVCNTR_EL0(26),
1604 PMU_PMEVCNTR_EL0(27),
1605 PMU_PMEVCNTR_EL0(28),
1606 PMU_PMEVCNTR_EL0(29),
1607 PMU_PMEVCNTR_EL0(30),
1608 /* PMEVTYPERn_EL0 */
1609 PMU_PMEVTYPER_EL0(0),
1610 PMU_PMEVTYPER_EL0(1),
1611 PMU_PMEVTYPER_EL0(2),
1612 PMU_PMEVTYPER_EL0(3),
1613 PMU_PMEVTYPER_EL0(4),
1614 PMU_PMEVTYPER_EL0(5),
1615 PMU_PMEVTYPER_EL0(6),
1616 PMU_PMEVTYPER_EL0(7),
1617 PMU_PMEVTYPER_EL0(8),
1618 PMU_PMEVTYPER_EL0(9),
1619 PMU_PMEVTYPER_EL0(10),
1620 PMU_PMEVTYPER_EL0(11),
1621 PMU_PMEVTYPER_EL0(12),
1622 PMU_PMEVTYPER_EL0(13),
1623 PMU_PMEVTYPER_EL0(14),
1624 PMU_PMEVTYPER_EL0(15),
1625 PMU_PMEVTYPER_EL0(16),
1626 PMU_PMEVTYPER_EL0(17),
1627 PMU_PMEVTYPER_EL0(18),
1628 PMU_PMEVTYPER_EL0(19),
1629 PMU_PMEVTYPER_EL0(20),
1630 PMU_PMEVTYPER_EL0(21),
1631 PMU_PMEVTYPER_EL0(22),
1632 PMU_PMEVTYPER_EL0(23),
1633 PMU_PMEVTYPER_EL0(24),
1634 PMU_PMEVTYPER_EL0(25),
1635 PMU_PMEVTYPER_EL0(26),
1636 PMU_PMEVTYPER_EL0(27),
1637 PMU_PMEVTYPER_EL0(28),
1638 PMU_PMEVTYPER_EL0(29),
1639 PMU_PMEVTYPER_EL0(30),
1641 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1642 * in 32bit mode. Here we choose to reset it as zero for consistency.
1644 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1646 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1647 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1648 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1651 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1652 struct sys_reg_params *p,
1653 const struct sys_reg_desc *r)
1656 return ignore_write(vcpu, p);
1658 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1659 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1660 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1662 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1663 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1664 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1665 | (6 << 16) | (el3 << 14) | (el3 << 12));
1670 static bool trap_debug32(struct kvm_vcpu *vcpu,
1671 struct sys_reg_params *p,
1672 const struct sys_reg_desc *r)
1675 vcpu_cp14(vcpu, r->reg) = p->regval;
1676 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1678 p->regval = vcpu_cp14(vcpu, r->reg);
1684 /* AArch32 debug register mappings
1686 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1687 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1689 * All control registers and watchpoint value registers are mapped to
1690 * the lower 32 bits of their AArch64 equivalents. We share the trap
1691 * handlers with the above AArch64 code which checks what mode the
1695 static bool trap_xvr(struct kvm_vcpu *vcpu,
1696 struct sys_reg_params *p,
1697 const struct sys_reg_desc *rd)
1699 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1704 val &= 0xffffffffUL;
1705 val |= p->regval << 32;
1708 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1710 p->regval = *dbg_reg >> 32;
1713 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1718 #define DBG_BCR_BVR_WCR_WVR(n) \
1720 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1722 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1724 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1726 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1728 #define DBGBXVR(n) \
1729 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1732 * Trapped cp14 registers. We generally ignore most of the external
1733 * debug, on the principle that they don't really make sense to a
1734 * guest. Revisit this one day, would this principle change.
1736 static const struct sys_reg_desc cp14_regs[] = {
1738 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1740 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1742 DBG_BCR_BVR_WCR_WVR(0),
1744 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1745 DBG_BCR_BVR_WCR_WVR(1),
1747 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
1749 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
1750 DBG_BCR_BVR_WCR_WVR(2),
1751 /* DBGDTR[RT]Xint */
1752 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1753 /* DBGDTR[RT]Xext */
1754 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1755 DBG_BCR_BVR_WCR_WVR(3),
1756 DBG_BCR_BVR_WCR_WVR(4),
1757 DBG_BCR_BVR_WCR_WVR(5),
1759 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1761 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1762 DBG_BCR_BVR_WCR_WVR(6),
1764 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
1765 DBG_BCR_BVR_WCR_WVR(7),
1766 DBG_BCR_BVR_WCR_WVR(8),
1767 DBG_BCR_BVR_WCR_WVR(9),
1768 DBG_BCR_BVR_WCR_WVR(10),
1769 DBG_BCR_BVR_WCR_WVR(11),
1770 DBG_BCR_BVR_WCR_WVR(12),
1771 DBG_BCR_BVR_WCR_WVR(13),
1772 DBG_BCR_BVR_WCR_WVR(14),
1773 DBG_BCR_BVR_WCR_WVR(15),
1775 /* DBGDRAR (32bit) */
1776 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1780 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1783 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1787 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1790 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1803 /* DBGDSAR (32bit) */
1804 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1807 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1809 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1811 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1813 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1815 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1817 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1820 /* Trapped cp14 64bit registers */
1821 static const struct sys_reg_desc cp14_64_regs[] = {
1822 /* DBGDRAR (64bit) */
1823 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1825 /* DBGDSAR (64bit) */
1826 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1829 /* Macro to expand the PMEVCNTRn register */
1830 #define PMU_PMEVCNTR(n) \
1832 { Op1(0), CRn(0b1110), \
1833 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1836 /* Macro to expand the PMEVTYPERn register */
1837 #define PMU_PMEVTYPER(n) \
1839 { Op1(0), CRn(0b1110), \
1840 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1841 access_pmu_evtyper }
1844 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1845 * depending on the way they are accessed (as a 32bit or a 64bit
1848 static const struct sys_reg_desc cp15_regs[] = {
1849 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1850 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1851 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1852 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1853 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1854 { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
1855 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1856 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1857 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1858 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1859 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1860 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1861 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1864 * DC{C,I,CI}SW operations:
1866 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1867 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1868 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1871 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1872 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1873 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1874 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1875 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1876 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1877 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1878 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1879 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1880 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1881 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1882 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1883 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1884 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1885 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1887 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1888 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1889 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1890 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1893 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1895 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1898 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
1899 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
1966 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1968 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
1969 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
1970 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
1973 static const struct sys_reg_desc cp15_64_regs[] = {
1974 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1975 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1976 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1977 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1978 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1979 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1980 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
1983 /* Target specific emulation tables */
1984 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1986 void kvm_register_target_sys_reg_table(unsigned int target,
1987 struct kvm_sys_reg_target_table *table)
1989 target_tables[target] = table;
1992 /* Get specific register table for this target. */
1993 static const struct sys_reg_desc *get_target_table(unsigned target,
1997 struct kvm_sys_reg_target_table *table;
1999 table = target_tables[target];
2001 *num = table->table64.num;
2002 return table->table64.table;
2004 *num = table->table32.num;
2005 return table->table32.table;
2009 static int match_sys_reg(const void *key, const void *elt)
2011 const unsigned long pval = (unsigned long)key;
2012 const struct sys_reg_desc *r = elt;
2014 return pval - reg_to_encoding(r);
2017 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2018 const struct sys_reg_desc table[],
2021 unsigned long pval = reg_to_encoding(params);
2023 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2026 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
2028 kvm_inject_undefined(vcpu);
2032 static void perform_access(struct kvm_vcpu *vcpu,
2033 struct sys_reg_params *params,
2034 const struct sys_reg_desc *r)
2036 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2038 /* Check for regs disabled by runtime config */
2039 if (sysreg_hidden_from_guest(vcpu, r)) {
2040 kvm_inject_undefined(vcpu);
2045 * Not having an accessor means that we have configured a trap
2046 * that we don't know how to handle. This certainly qualifies
2047 * as a gross bug that should be fixed right away.
2051 /* Skip instruction if instructed so */
2052 if (likely(r->access(vcpu, params, r)))
2053 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2057 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2058 * call the corresponding trap handler.
2060 * @params: pointer to the descriptor of the access
2061 * @table: array of trap descriptors
2062 * @num: size of the trap descriptor array
2064 * Return 0 if the access has been handled, and -1 if not.
2066 static int emulate_cp(struct kvm_vcpu *vcpu,
2067 struct sys_reg_params *params,
2068 const struct sys_reg_desc *table,
2071 const struct sys_reg_desc *r;
2074 return -1; /* Not handled */
2076 r = find_reg(params, table, num);
2079 perform_access(vcpu, params, r);
2087 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2088 struct sys_reg_params *params)
2090 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
2094 case ESR_ELx_EC_CP15_32:
2095 case ESR_ELx_EC_CP15_64:
2098 case ESR_ELx_EC_CP14_MR:
2099 case ESR_ELx_EC_CP14_64:
2106 kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
2107 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2108 print_sys_reg_instr(params);
2109 kvm_inject_undefined(vcpu);
2113 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2114 * @vcpu: The VCPU pointer
2115 * @run: The kvm_run struct
2117 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2118 const struct sys_reg_desc *global,
2120 const struct sys_reg_desc *target_specific,
2123 struct sys_reg_params params;
2124 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2125 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2126 int Rt2 = (hsr >> 10) & 0x1f;
2128 params.is_aarch32 = true;
2129 params.is_32bit = false;
2130 params.CRm = (hsr >> 1) & 0xf;
2131 params.is_write = ((hsr & 1) == 0);
2134 params.Op1 = (hsr >> 16) & 0xf;
2139 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2140 * backends between AArch32 and AArch64, we get away with it.
2142 if (params.is_write) {
2143 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2144 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2148 * Try to emulate the coprocessor access using the target
2149 * specific table first, and using the global table afterwards.
2150 * If either of the tables contains a handler, handle the
2151 * potential register operation in the case of a read and return
2154 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2155 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2156 /* Split up the value between registers for the read side */
2157 if (!params.is_write) {
2158 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2159 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2165 unhandled_cp_access(vcpu, ¶ms);
2170 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2171 * @vcpu: The VCPU pointer
2172 * @run: The kvm_run struct
2174 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2175 const struct sys_reg_desc *global,
2177 const struct sys_reg_desc *target_specific,
2180 struct sys_reg_params params;
2181 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2182 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2184 params.is_aarch32 = true;
2185 params.is_32bit = true;
2186 params.CRm = (hsr >> 1) & 0xf;
2187 params.regval = vcpu_get_reg(vcpu, Rt);
2188 params.is_write = ((hsr & 1) == 0);
2189 params.CRn = (hsr >> 10) & 0xf;
2191 params.Op1 = (hsr >> 14) & 0x7;
2192 params.Op2 = (hsr >> 17) & 0x7;
2194 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2195 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2196 if (!params.is_write)
2197 vcpu_set_reg(vcpu, Rt, params.regval);
2201 unhandled_cp_access(vcpu, ¶ms);
2205 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2207 const struct sys_reg_desc *target_specific;
2210 target_specific = get_target_table(vcpu->arch.target, false, &num);
2211 return kvm_handle_cp_64(vcpu,
2212 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2213 target_specific, num);
2216 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2218 const struct sys_reg_desc *target_specific;
2221 target_specific = get_target_table(vcpu->arch.target, false, &num);
2222 return kvm_handle_cp_32(vcpu,
2223 cp15_regs, ARRAY_SIZE(cp15_regs),
2224 target_specific, num);
2227 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2229 return kvm_handle_cp_64(vcpu,
2230 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2234 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2236 return kvm_handle_cp_32(vcpu,
2237 cp14_regs, ARRAY_SIZE(cp14_regs),
2241 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2242 struct sys_reg_params *params)
2245 const struct sys_reg_desc *table, *r;
2247 table = get_target_table(vcpu->arch.target, true, &num);
2249 /* Search target-specific then generic table. */
2250 r = find_reg(params, table, num);
2252 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2255 perform_access(vcpu, params, r);
2257 kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
2258 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2259 print_sys_reg_instr(params);
2260 kvm_inject_undefined(vcpu);
2265 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2266 const struct sys_reg_desc *table, size_t num,
2267 unsigned long *bmap)
2271 for (i = 0; i < num; i++)
2272 if (table[i].reset) {
2273 int reg = table[i].reg;
2275 table[i].reset(vcpu, &table[i]);
2276 if (reg > 0 && reg < NR_SYS_REGS)
2282 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2283 * @vcpu: The VCPU pointer
2284 * @run: The kvm_run struct
2286 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2288 struct sys_reg_params params;
2289 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2290 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2293 trace_kvm_handle_sys_reg(esr);
2295 params.is_aarch32 = false;
2296 params.is_32bit = false;
2297 params.Op0 = (esr >> 20) & 3;
2298 params.Op1 = (esr >> 14) & 0x7;
2299 params.CRn = (esr >> 10) & 0xf;
2300 params.CRm = (esr >> 1) & 0xf;
2301 params.Op2 = (esr >> 17) & 0x7;
2302 params.regval = vcpu_get_reg(vcpu, Rt);
2303 params.is_write = !(esr & 1);
2305 ret = emulate_sys_reg(vcpu, ¶ms);
2307 if (!params.is_write)
2308 vcpu_set_reg(vcpu, Rt, params.regval);
2312 /******************************************************************************
2314 *****************************************************************************/
2316 static bool index_to_params(u64 id, struct sys_reg_params *params)
2318 switch (id & KVM_REG_SIZE_MASK) {
2319 case KVM_REG_SIZE_U64:
2320 /* Any unused index bits means it's not valid. */
2321 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2322 | KVM_REG_ARM_COPROC_MASK
2323 | KVM_REG_ARM64_SYSREG_OP0_MASK
2324 | KVM_REG_ARM64_SYSREG_OP1_MASK
2325 | KVM_REG_ARM64_SYSREG_CRN_MASK
2326 | KVM_REG_ARM64_SYSREG_CRM_MASK
2327 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2329 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2330 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2331 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2332 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2333 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2334 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2335 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2336 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2337 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2338 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2345 const struct sys_reg_desc *find_reg_by_id(u64 id,
2346 struct sys_reg_params *params,
2347 const struct sys_reg_desc table[],
2350 if (!index_to_params(id, params))
2353 return find_reg(params, table, num);
2356 /* Decode an index value, and find the sys_reg_desc entry. */
2357 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2361 const struct sys_reg_desc *table, *r;
2362 struct sys_reg_params params;
2364 /* We only do sys_reg for now. */
2365 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2368 if (!index_to_params(id, ¶ms))
2371 table = get_target_table(vcpu->arch.target, true, &num);
2372 r = find_reg(¶ms, table, num);
2374 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2376 /* Not saved in the sys_reg array and not otherwise accessible? */
2377 if (r && !(r->reg || r->get_user))
2384 * These are the invariant sys_reg registers: we let the guest see the
2385 * host versions of these, so they're part of the guest state.
2387 * A future CPU may provide a mechanism to present different values to
2388 * the guest, or a future kvm may trap them.
2391 #define FUNCTION_INVARIANT(reg) \
2392 static void get_##reg(struct kvm_vcpu *v, \
2393 const struct sys_reg_desc *r) \
2395 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2398 FUNCTION_INVARIANT(midr_el1)
2399 FUNCTION_INVARIANT(revidr_el1)
2400 FUNCTION_INVARIANT(clidr_el1)
2401 FUNCTION_INVARIANT(aidr_el1)
2403 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2405 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2408 /* ->val is filled in by kvm_sys_reg_table_init() */
2409 static struct sys_reg_desc invariant_sys_regs[] = {
2410 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2411 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2412 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2413 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2414 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2417 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2419 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2424 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2426 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2431 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2433 struct sys_reg_params params;
2434 const struct sys_reg_desc *r;
2436 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2437 ARRAY_SIZE(invariant_sys_regs));
2441 return reg_to_user(uaddr, &r->val, id);
2444 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2446 struct sys_reg_params params;
2447 const struct sys_reg_desc *r;
2449 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2451 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2452 ARRAY_SIZE(invariant_sys_regs));
2456 err = reg_from_user(&val, uaddr, id);
2460 /* This is what we mean by invariant: you can't change it. */
2467 static bool is_valid_cache(u32 val)
2471 if (val >= CSSELR_MAX)
2474 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2476 ctype = (cache_levels >> (level * 3)) & 7;
2479 case 0: /* No cache */
2481 case 1: /* Instruction cache only */
2483 case 2: /* Data cache only */
2484 case 4: /* Unified cache */
2486 case 3: /* Separate instruction and data caches */
2488 default: /* Reserved: we can't know instruction or data. */
2493 static int demux_c15_get(u64 id, void __user *uaddr)
2496 u32 __user *uval = uaddr;
2498 /* Fail if we have unknown bits set. */
2499 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2500 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2503 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2504 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2505 if (KVM_REG_SIZE(id) != 4)
2507 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2508 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2509 if (!is_valid_cache(val))
2512 return put_user(get_ccsidr(val), uval);
2518 static int demux_c15_set(u64 id, void __user *uaddr)
2521 u32 __user *uval = uaddr;
2523 /* Fail if we have unknown bits set. */
2524 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2525 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2528 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2529 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2530 if (KVM_REG_SIZE(id) != 4)
2532 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2533 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2534 if (!is_valid_cache(val))
2537 if (get_user(newval, uval))
2540 /* This is also invariant: you can't change it. */
2541 if (newval != get_ccsidr(val))
2549 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2551 const struct sys_reg_desc *r;
2552 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2554 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2555 return demux_c15_get(reg->id, uaddr);
2557 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2560 r = index_to_sys_reg_desc(vcpu, reg->id);
2562 return get_invariant_sys_reg(reg->id, uaddr);
2564 /* Check for regs disabled by runtime config */
2565 if (sysreg_hidden_from_user(vcpu, r))
2569 return (r->get_user)(vcpu, r, reg, uaddr);
2571 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2574 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2576 const struct sys_reg_desc *r;
2577 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2579 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2580 return demux_c15_set(reg->id, uaddr);
2582 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2585 r = index_to_sys_reg_desc(vcpu, reg->id);
2587 return set_invariant_sys_reg(reg->id, uaddr);
2589 /* Check for regs disabled by runtime config */
2590 if (sysreg_hidden_from_user(vcpu, r))
2594 return (r->set_user)(vcpu, r, reg, uaddr);
2596 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2599 static unsigned int num_demux_regs(void)
2601 unsigned int i, count = 0;
2603 for (i = 0; i < CSSELR_MAX; i++)
2604 if (is_valid_cache(i))
2610 static int write_demux_regids(u64 __user *uindices)
2612 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2615 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2616 for (i = 0; i < CSSELR_MAX; i++) {
2617 if (!is_valid_cache(i))
2619 if (put_user(val | i, uindices))
2626 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2628 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2629 KVM_REG_ARM64_SYSREG |
2630 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2631 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2632 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2633 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2634 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2637 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2642 if (put_user(sys_reg_to_index(reg), *uind))
2649 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2650 const struct sys_reg_desc *rd,
2652 unsigned int *total)
2655 * Ignore registers we trap but don't save,
2656 * and for which no custom user accessor is provided.
2658 if (!(rd->reg || rd->get_user))
2661 if (sysreg_hidden_from_user(vcpu, rd))
2664 if (!copy_reg_to_user(rd, uind))
2671 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2672 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2674 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2675 unsigned int total = 0;
2679 /* We check for duplicates here, to allow arch-specific overrides. */
2680 i1 = get_target_table(vcpu->arch.target, true, &num);
2683 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2685 BUG_ON(i1 == end1 || i2 == end2);
2687 /* Walk carefully, as both tables may refer to the same register. */
2689 int cmp = cmp_sys_reg(i1, i2);
2690 /* target-specific overrides generic entry. */
2692 err = walk_one_sys_reg(vcpu, i1, &uind, &total);
2694 err = walk_one_sys_reg(vcpu, i2, &uind, &total);
2699 if (cmp <= 0 && ++i1 == end1)
2701 if (cmp >= 0 && ++i2 == end2)
2707 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2709 return ARRAY_SIZE(invariant_sys_regs)
2711 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2714 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2719 /* Then give them all the invariant registers' indices. */
2720 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2721 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2726 err = walk_sys_regs(vcpu, uindices);
2731 return write_demux_regids(uindices);
2734 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2738 for (i = 1; i < n; i++) {
2739 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2740 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2748 void kvm_sys_reg_table_init(void)
2751 struct sys_reg_desc clidr;
2753 /* Make sure tables are unique and in order. */
2754 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2755 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2756 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2757 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2758 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2759 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2761 /* We abuse the reset function to overwrite the table itself. */
2762 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2763 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2766 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2768 * If software reads the Cache Type fields from Ctype1
2769 * upwards, once it has seen a value of 0b000, no caches
2770 * exist at further-out levels of the hierarchy. So, for
2771 * example, if Ctype3 is the first Cache Type field with a
2772 * value of 0b000, the values of Ctype4 to Ctype7 must be
2775 get_clidr_el1(NULL, &clidr); /* Ugly... */
2776 cache_levels = clidr.val;
2777 for (i = 0; i < 7; i++)
2778 if (((cache_levels >> (i*3)) & 7) == 0)
2780 /* Clear all higher bits. */
2781 cache_levels &= (1 << (i*3))-1;
2785 * kvm_reset_sys_regs - sets system registers to reset value
2786 * @vcpu: The VCPU pointer
2788 * This function finds the right table above and sets the registers on the
2789 * virtual CPU struct to their architecturally defined reset values.
2791 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2794 const struct sys_reg_desc *table;
2795 DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
2797 /* Generic chip reset first (so target could override). */
2798 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
2800 table = get_target_table(vcpu->arch.target, true, &num);
2801 reset_sys_reg_descs(vcpu, table, num, bmap);
2803 for (num = 1; num < NR_SYS_REGS; num++) {
2804 if (WARN(!test_bit(num, bmap),
2805 "Didn't reset __vcpu_sys_reg(%zi)\n", num))