1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
32 #include <trace/events/kvm.h>
39 * For AArch32, we only take care of what is being trapped. Anything
40 * that has to do with init and userspace access has to go via the
44 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
45 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
48 static bool read_from_write_only(struct kvm_vcpu *vcpu,
49 struct sys_reg_params *params,
50 const struct sys_reg_desc *r)
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params);
54 kvm_inject_undefined(vcpu);
58 static bool write_to_read_only(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r)
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params);
64 kvm_inject_undefined(vcpu);
68 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
70 u64 val = 0x8badf00d8badf00d;
72 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
73 __vcpu_read_sys_reg_from_cpu(reg, &val))
76 return __vcpu_sys_reg(vcpu, reg);
79 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
81 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
82 __vcpu_write_sys_reg_to_cpu(val, reg))
85 __vcpu_sys_reg(vcpu, reg) = val;
88 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
92 * Returns the minimum line size for the selected cache, expressed as
95 static u8 get_min_cache_line_size(bool icache)
97 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
101 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
103 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
106 * Cache line size is represented as Log2(words) in CTR_EL0.
107 * Log2(bytes) can be derived with the following:
109 * Log2(words) + 2 = Log2(bytes / 4) + 2
110 * = Log2(bytes) - 2 + 2
116 /* Which cache CCSIDR represents depends on CSSELR value. */
117 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
121 if (vcpu->arch.ccsidr)
122 return vcpu->arch.ccsidr[csselr];
124 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
127 * Fabricate a CCSIDR value as the overriding value does not exist.
128 * The real CCSIDR value will not be used as it can vary by the
129 * physical CPU which the vcpu currently resides in.
131 * The line size is determined with get_min_cache_line_size(), which
132 * should be valid for all CPUs even if they have different cache
135 * The associativity bits are cleared, meaning the geometry of all data
136 * and unified caches (which are guaranteed to be PIPT and thus
137 * non-aliasing) are 1 set and 1 way.
138 * Guests should not be doing cache operations by set/way at all, and
139 * for this reason, we trap them and attempt to infer the intent, so
140 * that we can flush the entire guest's address space at the appropriate
141 * time. The exposed geometry minimizes the number of the traps.
142 * [If guests should attempt to infer aliasing properties from the
143 * geometry (which is not permitted by the architecture), they would
144 * only do so for virtually indexed caches.]
146 * We don't check if the cache level exists as it is allowed to return
147 * an UNKNOWN value if not.
149 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
152 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
154 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
155 u32 *ccsidr = vcpu->arch.ccsidr;
158 if ((val & CCSIDR_EL1_RES0) ||
159 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
163 if (val == get_ccsidr(vcpu, csselr))
166 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
170 for (i = 0; i < CSSELR_MAX; i++)
171 ccsidr[i] = get_ccsidr(vcpu, i);
173 vcpu->arch.ccsidr = ccsidr;
176 ccsidr[csselr] = val;
181 static bool access_rw(struct kvm_vcpu *vcpu,
182 struct sys_reg_params *p,
183 const struct sys_reg_desc *r)
186 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
188 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
194 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
196 static bool access_dcsw(struct kvm_vcpu *vcpu,
197 struct sys_reg_params *p,
198 const struct sys_reg_desc *r)
201 return read_from_write_only(vcpu, p, r);
204 * Only track S/W ops if we don't have FWB. It still indicates
205 * that the guest is a bit broken (S/W operations should only
206 * be done by firmware, knowing that there is only a single
207 * CPU left in the system, and certainly not from non-secure
210 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
211 kvm_set_way_flush(vcpu);
216 static bool access_dcgsw(struct kvm_vcpu *vcpu,
217 struct sys_reg_params *p,
218 const struct sys_reg_desc *r)
220 if (!kvm_has_mte(vcpu->kvm)) {
221 kvm_inject_undefined(vcpu);
225 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
226 return access_dcsw(vcpu, p, r);
229 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
231 switch (r->aarch32_map) {
233 *mask = GENMASK_ULL(31, 0);
237 *mask = GENMASK_ULL(63, 32);
241 *mask = GENMASK_ULL(63, 0);
248 * Generic accessor for VM registers. Only called as long as HCR_TVM
249 * is set. If the guest enables the MMU, we stop trapping the VM
250 * sys_regs and leave it in complete control of the caches.
252 static bool access_vm_reg(struct kvm_vcpu *vcpu,
253 struct sys_reg_params *p,
254 const struct sys_reg_desc *r)
256 bool was_enabled = vcpu_has_cache_enabled(vcpu);
257 u64 val, mask, shift;
259 BUG_ON(!p->is_write);
261 get_access_mask(r, &mask, &shift);
264 val = vcpu_read_sys_reg(vcpu, r->reg);
270 val |= (p->regval & (mask >> shift)) << shift;
271 vcpu_write_sys_reg(vcpu, val, r->reg);
273 kvm_toggle_cache(vcpu, was_enabled);
277 static bool access_actlr(struct kvm_vcpu *vcpu,
278 struct sys_reg_params *p,
279 const struct sys_reg_desc *r)
284 return ignore_write(vcpu, p);
286 get_access_mask(r, &mask, &shift);
287 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
293 * Trap handler for the GICv3 SGI generation system register.
294 * Forward the request to the VGIC emulation.
295 * The cp15_64 code makes sure this automatically works
296 * for both AArch64 and AArch32 accesses.
298 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
299 struct sys_reg_params *p,
300 const struct sys_reg_desc *r)
305 return read_from_write_only(vcpu, p, r);
308 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
309 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
310 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
311 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
314 if (p->Op0 == 0) { /* AArch32 */
316 default: /* Keep GCC quiet */
317 case 0: /* ICC_SGI1R */
320 case 1: /* ICC_ASGI1R */
321 case 2: /* ICC_SGI0R */
325 } else { /* AArch64 */
327 default: /* Keep GCC quiet */
328 case 5: /* ICC_SGI1R_EL1 */
331 case 6: /* ICC_ASGI1R_EL1 */
332 case 7: /* ICC_SGI0R_EL1 */
338 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
343 static bool access_gic_sre(struct kvm_vcpu *vcpu,
344 struct sys_reg_params *p,
345 const struct sys_reg_desc *r)
348 return ignore_write(vcpu, p);
350 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
354 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
355 struct sys_reg_params *p,
356 const struct sys_reg_desc *r)
359 return ignore_write(vcpu, p);
361 return read_zero(vcpu, p);
364 static bool trap_undef(struct kvm_vcpu *vcpu,
365 struct sys_reg_params *p,
366 const struct sys_reg_desc *r)
368 kvm_inject_undefined(vcpu);
373 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
374 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
375 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
376 * treat it separately.
378 static bool trap_loregion(struct kvm_vcpu *vcpu,
379 struct sys_reg_params *p,
380 const struct sys_reg_desc *r)
382 u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
383 u32 sr = reg_to_encoding(r);
385 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
386 kvm_inject_undefined(vcpu);
390 if (p->is_write && sr == SYS_LORID_EL1)
391 return write_to_read_only(vcpu, p, r);
393 return trap_raz_wi(vcpu, p, r);
396 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
397 struct sys_reg_params *p,
398 const struct sys_reg_desc *r)
403 return read_from_write_only(vcpu, p, r);
405 /* Forward the OSLK bit to OSLSR */
406 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
407 if (p->regval & OSLAR_EL1_OSLK)
408 oslsr |= OSLSR_EL1_OSLK;
410 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
414 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
415 struct sys_reg_params *p,
416 const struct sys_reg_desc *r)
419 return write_to_read_only(vcpu, p, r);
421 p->regval = __vcpu_sys_reg(vcpu, r->reg);
425 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
429 * The only modifiable bit is the OSLK bit. Refuse the write if
430 * userspace attempts to change any other bit in the register.
432 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
435 __vcpu_sys_reg(vcpu, rd->reg) = val;
439 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
440 struct sys_reg_params *p,
441 const struct sys_reg_desc *r)
444 return ignore_write(vcpu, p);
446 p->regval = read_sysreg(dbgauthstatus_el1);
452 * We want to avoid world-switching all the DBG registers all the
455 * - If we've touched any debug register, it is likely that we're
456 * going to touch more of them. It then makes sense to disable the
457 * traps and start doing the save/restore dance
458 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
459 * then mandatory to save/restore the registers, as the guest
462 * For this, we use a DIRTY bit, indicating the guest has modified the
463 * debug registers, used as follow:
466 * - If the dirty bit is set (because we're coming back from trapping),
467 * disable the traps, save host registers, restore guest registers.
468 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
469 * set the dirty bit, disable the traps, save host registers,
470 * restore guest registers.
471 * - Otherwise, enable the traps
474 * - If the dirty bit is set, save guest registers, restore host
475 * registers and clear the dirty bit. This ensure that the host can
476 * now use the debug registers.
478 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
479 struct sys_reg_params *p,
480 const struct sys_reg_desc *r)
482 access_rw(vcpu, p, r);
484 vcpu_set_flag(vcpu, DEBUG_DIRTY);
486 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
492 * reg_to_dbg/dbg_to_reg
494 * A 32 bit write to a debug register leave top bits alone
495 * A 32 bit read from a debug register only returns the bottom bits
497 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
498 * switches between host and guest values in future.
500 static void reg_to_dbg(struct kvm_vcpu *vcpu,
501 struct sys_reg_params *p,
502 const struct sys_reg_desc *rd,
505 u64 mask, shift, val;
507 get_access_mask(rd, &mask, &shift);
511 val |= (p->regval & (mask >> shift)) << shift;
514 vcpu_set_flag(vcpu, DEBUG_DIRTY);
517 static void dbg_to_reg(struct kvm_vcpu *vcpu,
518 struct sys_reg_params *p,
519 const struct sys_reg_desc *rd,
524 get_access_mask(rd, &mask, &shift);
525 p->regval = (*dbg_reg & mask) >> shift;
528 static bool trap_bvr(struct kvm_vcpu *vcpu,
529 struct sys_reg_params *p,
530 const struct sys_reg_desc *rd)
532 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
535 reg_to_dbg(vcpu, p, rd, dbg_reg);
537 dbg_to_reg(vcpu, p, rd, dbg_reg);
539 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
544 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
547 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
551 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
554 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
558 static u64 reset_bvr(struct kvm_vcpu *vcpu,
559 const struct sys_reg_desc *rd)
561 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
565 static bool trap_bcr(struct kvm_vcpu *vcpu,
566 struct sys_reg_params *p,
567 const struct sys_reg_desc *rd)
569 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
572 reg_to_dbg(vcpu, p, rd, dbg_reg);
574 dbg_to_reg(vcpu, p, rd, dbg_reg);
576 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
581 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
584 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
588 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
591 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
595 static u64 reset_bcr(struct kvm_vcpu *vcpu,
596 const struct sys_reg_desc *rd)
598 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
602 static bool trap_wvr(struct kvm_vcpu *vcpu,
603 struct sys_reg_params *p,
604 const struct sys_reg_desc *rd)
606 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
609 reg_to_dbg(vcpu, p, rd, dbg_reg);
611 dbg_to_reg(vcpu, p, rd, dbg_reg);
613 trace_trap_reg(__func__, rd->CRm, p->is_write,
614 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
619 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
622 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
626 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
629 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
633 static u64 reset_wvr(struct kvm_vcpu *vcpu,
634 const struct sys_reg_desc *rd)
636 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
640 static bool trap_wcr(struct kvm_vcpu *vcpu,
641 struct sys_reg_params *p,
642 const struct sys_reg_desc *rd)
644 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
647 reg_to_dbg(vcpu, p, rd, dbg_reg);
649 dbg_to_reg(vcpu, p, rd, dbg_reg);
651 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
656 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
659 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
663 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
666 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
670 static u64 reset_wcr(struct kvm_vcpu *vcpu,
671 const struct sys_reg_desc *rd)
673 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
677 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
679 u64 amair = read_sysreg(amair_el1);
680 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
684 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
686 u64 actlr = read_sysreg(actlr_el1);
687 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
691 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
696 * Map the vcpu_id into the first three affinity level fields of
697 * the MPIDR. We limit the number of VCPUs in level 0 due to a
698 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
699 * of the GICv3 to be able to address each CPU directly when
702 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
703 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
704 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
705 mpidr |= (1ULL << 31);
706 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
711 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
712 const struct sys_reg_desc *r)
714 if (kvm_vcpu_has_pmu(vcpu))
720 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
722 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
723 u8 n = vcpu->kvm->arch.pmcr_n;
726 mask |= GENMASK(n - 1, 0);
728 reset_unknown(vcpu, r);
729 __vcpu_sys_reg(vcpu, r->reg) &= mask;
731 return __vcpu_sys_reg(vcpu, r->reg);
734 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
736 reset_unknown(vcpu, r);
737 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
739 return __vcpu_sys_reg(vcpu, r->reg);
742 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
744 /* This thing will UNDEF, who cares about the reset value? */
745 if (!kvm_vcpu_has_pmu(vcpu))
748 reset_unknown(vcpu, r);
749 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
751 return __vcpu_sys_reg(vcpu, r->reg);
754 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
756 reset_unknown(vcpu, r);
757 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
759 return __vcpu_sys_reg(vcpu, r->reg);
762 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
766 if (!kvm_supports_32bit_el0())
767 pmcr |= ARMV8_PMU_PMCR_LC;
770 * The value of PMCR.N field is included when the
771 * vCPU register is read via kvm_vcpu_read_pmcr().
773 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
775 return __vcpu_sys_reg(vcpu, r->reg);
778 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
780 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
781 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
784 kvm_inject_undefined(vcpu);
789 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
791 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
794 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
796 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
799 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
801 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
804 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
806 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
809 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
810 const struct sys_reg_desc *r)
814 if (pmu_access_el0_disabled(vcpu))
819 * Only update writeable bits of PMCR (continuing into
820 * kvm_pmu_handle_pmcr() as well)
822 val = kvm_vcpu_read_pmcr(vcpu);
823 val &= ~ARMV8_PMU_PMCR_MASK;
824 val |= p->regval & ARMV8_PMU_PMCR_MASK;
825 if (!kvm_supports_32bit_el0())
826 val |= ARMV8_PMU_PMCR_LC;
827 kvm_pmu_handle_pmcr(vcpu, val);
829 /* PMCR.P & PMCR.C are RAZ */
830 val = kvm_vcpu_read_pmcr(vcpu)
831 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
838 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
839 const struct sys_reg_desc *r)
841 if (pmu_access_event_counter_el0_disabled(vcpu))
845 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
847 /* return PMSELR.SEL field */
848 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
849 & ARMV8_PMU_COUNTER_MASK;
854 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
855 const struct sys_reg_desc *r)
857 u64 pmceid, mask, shift;
861 if (pmu_access_el0_disabled(vcpu))
864 get_access_mask(r, &mask, &shift);
866 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
875 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
879 pmcr = kvm_vcpu_read_pmcr(vcpu);
880 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
881 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
882 kvm_inject_undefined(vcpu);
889 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
894 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
896 idx = ARMV8_PMU_CYCLE_IDX;
899 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
901 *val = kvm_pmu_get_counter_value(vcpu, idx);
905 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
906 struct sys_reg_params *p,
907 const struct sys_reg_desc *r)
911 if (r->CRn == 9 && r->CRm == 13) {
914 if (pmu_access_event_counter_el0_disabled(vcpu))
917 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
918 & ARMV8_PMU_COUNTER_MASK;
919 } else if (r->Op2 == 0) {
921 if (pmu_access_cycle_counter_el0_disabled(vcpu))
924 idx = ARMV8_PMU_CYCLE_IDX;
926 } else if (r->CRn == 0 && r->CRm == 9) {
928 if (pmu_access_event_counter_el0_disabled(vcpu))
931 idx = ARMV8_PMU_CYCLE_IDX;
932 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
934 if (pmu_access_event_counter_el0_disabled(vcpu))
937 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
940 /* Catch any decoding mistake */
941 WARN_ON(idx == ~0UL);
943 if (!pmu_counter_idx_valid(vcpu, idx))
947 if (pmu_access_el0_disabled(vcpu))
950 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
952 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
958 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
959 const struct sys_reg_desc *r)
963 if (pmu_access_el0_disabled(vcpu))
966 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
968 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
969 reg = PMEVTYPER0_EL0 + idx;
970 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
971 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
972 if (idx == ARMV8_PMU_CYCLE_IDX)
976 reg = PMEVTYPER0_EL0 + idx;
981 if (!pmu_counter_idx_valid(vcpu, idx))
985 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
986 kvm_vcpu_pmu_restore_guest(vcpu);
988 p->regval = __vcpu_sys_reg(vcpu, reg);
994 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
998 val &= kvm_pmu_valid_counter_mask(vcpu);
1002 /* CRm[1] being set indicates a SET register, and CLR otherwise */
1006 /* Op2[0] being set indicates a SET register, and CLR otherwise */
1012 __vcpu_sys_reg(vcpu, r->reg) |= val;
1014 __vcpu_sys_reg(vcpu, r->reg) &= ~val;
1019 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1021 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1023 *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1027 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1028 const struct sys_reg_desc *r)
1032 if (pmu_access_el0_disabled(vcpu))
1035 mask = kvm_pmu_valid_counter_mask(vcpu);
1037 val = p->regval & mask;
1039 /* accessing PMCNTENSET_EL0 */
1040 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1041 kvm_pmu_enable_counter_mask(vcpu, val);
1042 kvm_vcpu_pmu_restore_guest(vcpu);
1044 /* accessing PMCNTENCLR_EL0 */
1045 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1046 kvm_pmu_disable_counter_mask(vcpu, val);
1049 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1055 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1056 const struct sys_reg_desc *r)
1058 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1060 if (check_pmu_access_disabled(vcpu, 0))
1064 u64 val = p->regval & mask;
1067 /* accessing PMINTENSET_EL1 */
1068 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1070 /* accessing PMINTENCLR_EL1 */
1071 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1073 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1079 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1080 const struct sys_reg_desc *r)
1082 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1084 if (pmu_access_el0_disabled(vcpu))
1089 /* accessing PMOVSSET_EL0 */
1090 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1092 /* accessing PMOVSCLR_EL0 */
1093 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1095 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1101 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1102 const struct sys_reg_desc *r)
1107 return read_from_write_only(vcpu, p, r);
1109 if (pmu_write_swinc_el0_disabled(vcpu))
1112 mask = kvm_pmu_valid_counter_mask(vcpu);
1113 kvm_pmu_software_increment(vcpu, p->regval & mask);
1117 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1118 const struct sys_reg_desc *r)
1121 if (!vcpu_mode_priv(vcpu)) {
1122 kvm_inject_undefined(vcpu);
1126 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1127 p->regval & ARMV8_PMU_USERENR_MASK;
1129 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1130 & ARMV8_PMU_USERENR_MASK;
1136 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1139 *val = kvm_vcpu_read_pmcr(vcpu);
1143 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1146 u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
1147 struct kvm *kvm = vcpu->kvm;
1149 mutex_lock(&kvm->arch.config_lock);
1152 * The vCPU can't have more counters than the PMU hardware
1153 * implements. Ignore this error to maintain compatibility
1154 * with the existing KVM behavior.
1156 if (!kvm_vm_has_ran_once(kvm) &&
1157 new_n <= kvm_arm_pmu_get_max_counters(kvm))
1158 kvm->arch.pmcr_n = new_n;
1160 mutex_unlock(&kvm->arch.config_lock);
1163 * Ignore writes to RES0 bits, read only bits that are cleared on
1164 * vCPU reset, and writable bits that KVM doesn't support yet.
1165 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1166 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1167 * But, we leave the bit as it is here, as the vCPU's PMUver might
1168 * be changed later (NOTE: the bit will be cleared on first vCPU run
1171 val &= ARMV8_PMU_PMCR_MASK;
1173 /* The LC bit is RES1 when AArch32 is not supported */
1174 if (!kvm_supports_32bit_el0())
1175 val |= ARMV8_PMU_PMCR_LC;
1177 __vcpu_sys_reg(vcpu, r->reg) = val;
1181 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1182 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1183 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1184 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1185 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1186 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1187 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1188 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1189 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1190 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1192 #define PMU_SYS_REG(name) \
1193 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1194 .visibility = pmu_visibility
1196 /* Macro to expand the PMEVCNTRn_EL0 register */
1197 #define PMU_PMEVCNTR_EL0(n) \
1198 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1199 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1200 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1202 /* Macro to expand the PMEVTYPERn_EL0 register */
1203 #define PMU_PMEVTYPER_EL0(n) \
1204 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1205 .reset = reset_pmevtyper, \
1206 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1208 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1209 const struct sys_reg_desc *r)
1211 kvm_inject_undefined(vcpu);
1216 /* Macro to expand the AMU counter and type registers*/
1217 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1218 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1219 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1220 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1222 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1223 const struct sys_reg_desc *rd)
1225 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1229 * If we land here on a PtrAuth access, that is because we didn't
1230 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1231 * way this happens is when the guest does not have PtrAuth support
1234 #define __PTRAUTH_KEY(k) \
1235 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1236 .visibility = ptrauth_visibility}
1238 #define PTRAUTH_KEY(k) \
1239 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1240 __PTRAUTH_KEY(k ## KEYHI_EL1)
1242 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1243 struct sys_reg_params *p,
1244 const struct sys_reg_desc *r)
1246 enum kvm_arch_timers tmr;
1247 enum kvm_arch_timer_regs treg;
1248 u64 reg = reg_to_encoding(r);
1251 case SYS_CNTP_TVAL_EL0:
1252 case SYS_AARCH32_CNTP_TVAL:
1254 treg = TIMER_REG_TVAL;
1256 case SYS_CNTP_CTL_EL0:
1257 case SYS_AARCH32_CNTP_CTL:
1259 treg = TIMER_REG_CTL;
1261 case SYS_CNTP_CVAL_EL0:
1262 case SYS_AARCH32_CNTP_CVAL:
1264 treg = TIMER_REG_CVAL;
1266 case SYS_CNTPCT_EL0:
1267 case SYS_CNTPCTSS_EL0:
1268 case SYS_AARCH32_CNTPCT:
1270 treg = TIMER_REG_CNT;
1273 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1274 kvm_inject_undefined(vcpu);
1279 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1281 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1286 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1289 struct arm64_ftr_bits kvm_ftr = *ftrp;
1291 /* Some features have different safe value type in KVM than host features */
1293 case SYS_ID_AA64DFR0_EL1:
1294 switch (kvm_ftr.shift) {
1295 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1296 kvm_ftr.type = FTR_LOWER_SAFE;
1298 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1299 kvm_ftr.type = FTR_LOWER_SAFE;
1303 case SYS_ID_DFR0_EL1:
1304 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1305 kvm_ftr.type = FTR_LOWER_SAFE;
1309 return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1313 * arm64_check_features() - Check if a feature register value constitutes
1314 * a subset of features indicated by the idreg's KVM sanitised limit.
1316 * This function will check if each feature field of @val is the "safe" value
1317 * against idreg's KVM sanitised limit return from reset() callback.
1318 * If a field value in @val is the same as the one in limit, it is always
1319 * considered the safe value regardless For register fields that are not in
1320 * writable, only the value in limit is considered the safe value.
1322 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1324 static int arm64_check_features(struct kvm_vcpu *vcpu,
1325 const struct sys_reg_desc *rd,
1328 const struct arm64_ftr_reg *ftr_reg;
1329 const struct arm64_ftr_bits *ftrp = NULL;
1330 u32 id = reg_to_encoding(rd);
1331 u64 writable_mask = rd->val;
1332 u64 limit = rd->reset(vcpu, rd);
1336 * Hidden and unallocated ID registers may not have a corresponding
1337 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1338 * only safe value is 0.
1340 if (sysreg_visible_as_raz(vcpu, rd))
1341 return val ? -E2BIG : 0;
1343 ftr_reg = get_arm64_ftr_reg(id);
1347 ftrp = ftr_reg->ftr_bits;
1349 for (; ftrp && ftrp->width; ftrp++) {
1350 s64 f_val, f_lim, safe_val;
1353 ftr_mask = arm64_ftr_mask(ftrp);
1354 if ((ftr_mask & writable_mask) != ftr_mask)
1357 f_val = arm64_ftr_value(ftrp, val);
1358 f_lim = arm64_ftr_value(ftrp, limit);
1364 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1366 if (safe_val != f_val)
1370 /* For fields that are not writable, values in limit are the safe values. */
1371 if ((val & ~mask) != (limit & ~mask))
1377 static u8 pmuver_to_perfmon(u8 pmuver)
1380 case ID_AA64DFR0_EL1_PMUVer_IMP:
1381 return ID_DFR0_EL1_PerfMon_PMUv3;
1382 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1383 return ID_DFR0_EL1_PerfMon_IMPDEF;
1385 /* Anything ARMv8.1+ and NI have the same value. For now. */
1390 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1391 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1392 const struct sys_reg_desc *r)
1394 u32 id = reg_to_encoding(r);
1397 if (sysreg_visible_as_raz(vcpu, r))
1400 val = read_sanitised_ftr_reg(id);
1403 case SYS_ID_AA64PFR1_EL1:
1404 if (!kvm_has_mte(vcpu->kvm))
1405 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1407 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1409 case SYS_ID_AA64ISAR1_EL1:
1410 if (!vcpu_has_ptrauth(vcpu))
1411 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1412 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1413 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1414 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1416 case SYS_ID_AA64ISAR2_EL1:
1417 if (!vcpu_has_ptrauth(vcpu))
1418 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1419 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1420 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1421 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1423 case SYS_ID_AA64MMFR2_EL1:
1424 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1426 case SYS_ID_MMFR4_EL1:
1427 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1434 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1435 const struct sys_reg_desc *r)
1437 return __kvm_read_sanitised_id_reg(vcpu, r);
1440 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1442 return IDREG(vcpu->kvm, reg_to_encoding(r));
1446 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1447 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
1449 static inline bool is_id_reg(u32 id)
1451 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1452 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1453 sys_reg_CRm(id) < 8);
1456 static inline bool is_aa32_id_reg(u32 id)
1458 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1459 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1460 sys_reg_CRm(id) <= 3);
1463 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1464 const struct sys_reg_desc *r)
1466 u32 id = reg_to_encoding(r);
1469 case SYS_ID_AA64ZFR0_EL1:
1470 if (!vcpu_has_sve(vcpu))
1478 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1479 const struct sys_reg_desc *r)
1482 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1483 * EL. Promote to RAZ/WI in order to guarantee consistency between
1486 if (!kvm_supports_32bit_el0())
1487 return REG_RAZ | REG_USER_WI;
1489 return id_visibility(vcpu, r);
1492 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1493 const struct sys_reg_desc *r)
1498 /* cpufeature ID register access trap handlers */
1500 static bool access_id_reg(struct kvm_vcpu *vcpu,
1501 struct sys_reg_params *p,
1502 const struct sys_reg_desc *r)
1505 return write_to_read_only(vcpu, p, r);
1507 p->regval = read_id_reg(vcpu, r);
1508 if (vcpu_has_nv(vcpu))
1509 access_nested_id_reg(vcpu, p, r);
1514 /* Visibility overrides for SVE-specific control registers */
1515 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1516 const struct sys_reg_desc *rd)
1518 if (vcpu_has_sve(vcpu))
1524 static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1525 const struct sys_reg_desc *rd)
1527 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1529 if (!vcpu_has_sve(vcpu))
1530 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1533 * The default is to expose CSV2 == 1 if the HW isn't affected.
1534 * Although this is a per-CPU feature, we make it global because
1535 * asymmetric systems are just a nuisance.
1537 * Userspace can override this as long as it doesn't promise
1540 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1541 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1542 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1544 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1545 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1546 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1549 if (kvm_vgic_global_state.type == VGIC_V3) {
1550 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1551 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1554 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1559 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
1561 u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
1562 (val) &= ~reg##_##field##_MASK; \
1563 (val) |= FIELD_PREP(reg##_##field##_MASK, \
1564 min(__f_val, (u64)reg##_##field##_##limit)); \
1568 static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1569 const struct sys_reg_desc *rd)
1571 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1573 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1576 * Only initialize the PMU version if the vCPU was configured with one.
1578 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1579 if (kvm_vcpu_has_pmu(vcpu))
1580 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1581 kvm_arm_pmu_get_pmuver_limit());
1583 /* Hide SPE from guests */
1584 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1589 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1590 const struct sys_reg_desc *rd,
1593 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
1594 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1597 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1598 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1599 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1600 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1601 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1603 * At minimum, we're on the hook to allow values that were given to
1604 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1605 * with a more sensible NI. The value of an ID register changing under
1606 * the nose of the guest is unfortunate, but is certainly no more
1607 * surprising than an ill-guided PMU driver poking at impdef system
1608 * registers that end in an UNDEF...
1610 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1611 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1614 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
1615 * nonzero minimum safe value.
1617 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
1620 return set_id_reg(vcpu, rd, val);
1623 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1624 const struct sys_reg_desc *rd)
1626 u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1627 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1629 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1630 if (kvm_vcpu_has_pmu(vcpu))
1631 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1633 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
1638 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1639 const struct sys_reg_desc *rd,
1642 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1643 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
1645 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1646 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1651 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1652 * it doesn't promise more than what the HW gives us on the
1653 * AArch64 side (as everything is emulated with that), and
1654 * that this is a PMUv3.
1656 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1659 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
1662 return set_id_reg(vcpu, rd, val);
1666 * cpufeature ID register user accessors
1668 * For now, these registers are immutable for userspace, so no values
1669 * are stored, and for set_id_reg() we don't allow the effective value
1672 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1676 * Avoid locking if the VM has already started, as the ID registers are
1677 * guaranteed to be invariant at that point.
1679 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1680 *val = read_id_reg(vcpu, rd);
1684 mutex_lock(&vcpu->kvm->arch.config_lock);
1685 *val = read_id_reg(vcpu, rd);
1686 mutex_unlock(&vcpu->kvm->arch.config_lock);
1691 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1694 u32 id = reg_to_encoding(rd);
1697 mutex_lock(&vcpu->kvm->arch.config_lock);
1700 * Once the VM has started the ID registers are immutable. Reject any
1701 * write that does not match the final register value.
1703 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1704 if (val != read_id_reg(vcpu, rd))
1709 mutex_unlock(&vcpu->kvm->arch.config_lock);
1713 ret = arm64_check_features(vcpu, rd, val);
1715 IDREG(vcpu->kvm, id) = val;
1717 mutex_unlock(&vcpu->kvm->arch.config_lock);
1720 * arm64_check_features() returns -E2BIG to indicate the register's
1721 * feature set is a superset of the maximally-allowed register value.
1722 * While it would be nice to precisely describe this to userspace, the
1723 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1724 * writes return -EINVAL.
1731 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1738 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1744 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1745 const struct sys_reg_desc *r)
1748 return write_to_read_only(vcpu, p, r);
1750 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1754 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1755 const struct sys_reg_desc *r)
1758 return write_to_read_only(vcpu, p, r);
1760 p->regval = __vcpu_sys_reg(vcpu, r->reg);
1765 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1766 * by the physical CPU which the vcpu currently resides in.
1768 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1770 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1774 if ((ctr_el0 & CTR_EL0_IDC)) {
1776 * Data cache clean to the PoU is not required so LoUU and LoUIS
1777 * will not be set and a unified cache, which will be marked as
1778 * LoC, will be added.
1780 * If not DIC, let the unified cache L2 so that an instruction
1781 * cache can be added as L1 later.
1783 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1784 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1787 * Data cache clean to the PoU is required so let L1 have a data
1788 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1789 * it can be marked as LoC too.
1792 clidr = 1 << CLIDR_LOUU_SHIFT;
1793 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1794 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1798 * Instruction cache invalidation to the PoU is required so let L1 have
1799 * an instruction cache. If L1 already has a data cache, it will be
1800 * CACHE_TYPE_SEPARATE.
1802 if (!(ctr_el0 & CTR_EL0_DIC))
1803 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1805 clidr |= loc << CLIDR_LOC_SHIFT;
1808 * Add tag cache unified to data cache. Allocation tags and data are
1809 * unified in a cache line so that it looks valid even if there is only
1812 if (kvm_has_mte(vcpu->kvm))
1813 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1815 __vcpu_sys_reg(vcpu, r->reg) = clidr;
1817 return __vcpu_sys_reg(vcpu, r->reg);
1820 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1823 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1824 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1826 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1829 __vcpu_sys_reg(vcpu, rd->reg) = val;
1834 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1835 const struct sys_reg_desc *r)
1840 vcpu_write_sys_reg(vcpu, p->regval, reg);
1842 p->regval = vcpu_read_sys_reg(vcpu, reg);
1846 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1847 const struct sys_reg_desc *r)
1852 return write_to_read_only(vcpu, p, r);
1854 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1855 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1856 if (csselr < CSSELR_MAX)
1857 p->regval = get_ccsidr(vcpu, csselr);
1862 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1863 const struct sys_reg_desc *rd)
1865 if (kvm_has_mte(vcpu->kvm))
1871 #define MTE_REG(name) { \
1872 SYS_DESC(SYS_##name), \
1873 .access = undef_access, \
1874 .reset = reset_unknown, \
1876 .visibility = mte_visibility, \
1879 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
1880 const struct sys_reg_desc *rd)
1882 if (vcpu_has_nv(vcpu))
1888 #define EL2_REG(name, acc, rst, v) { \
1889 SYS_DESC(SYS_##name), \
1893 .visibility = el2_visibility, \
1898 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1899 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1900 * handling traps. Given that, they are always hidden from userspace.
1902 static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu,
1903 const struct sys_reg_desc *rd)
1905 return REG_HIDDEN_USER;
1908 #define EL12_REG(name, acc, rst, v) { \
1909 SYS_DESC(SYS_##name##_EL12), \
1912 .reg = name##_EL1, \
1914 .visibility = hidden_user_visibility, \
1918 * Since reset() callback and field val are not used for idregs, they will be
1919 * used for specific purposes for idregs.
1920 * The reset() would return KVM sanitised register value. The value would be the
1921 * same as the host kernel sanitised value if there is no KVM sanitisation.
1922 * The val would be used as a mask indicating writable fields for the idreg.
1923 * Only bits with 1 are writable from userspace. This mask might not be
1924 * necessary in the future whenever all ID registers are enabled as writable
1928 #define ID_DESC(name) \
1929 SYS_DESC(SYS_##name), \
1930 .access = access_id_reg, \
1931 .get_user = get_id_reg \
1933 /* sys_reg_desc initialiser for known cpufeature ID registers */
1934 #define ID_SANITISED(name) { \
1936 .set_user = set_id_reg, \
1937 .visibility = id_visibility, \
1938 .reset = kvm_read_sanitised_id_reg, \
1942 /* sys_reg_desc initialiser for known cpufeature ID registers */
1943 #define AA32_ID_SANITISED(name) { \
1945 .set_user = set_id_reg, \
1946 .visibility = aa32_id_visibility, \
1947 .reset = kvm_read_sanitised_id_reg, \
1951 /* sys_reg_desc initialiser for writable ID registers */
1952 #define ID_WRITABLE(name, mask) { \
1954 .set_user = set_id_reg, \
1955 .visibility = id_visibility, \
1956 .reset = kvm_read_sanitised_id_reg, \
1961 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1962 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1963 * (1 <= crm < 8, 0 <= Op2 < 8).
1965 #define ID_UNALLOCATED(crm, op2) { \
1966 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1967 .access = access_id_reg, \
1968 .get_user = get_id_reg, \
1969 .set_user = set_id_reg, \
1970 .visibility = raz_visibility, \
1971 .reset = kvm_read_sanitised_id_reg, \
1976 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1977 * For now, these are exposed just like unallocated ID regs: they appear
1978 * RAZ for the guest.
1980 #define ID_HIDDEN(name) { \
1982 .set_user = set_id_reg, \
1983 .visibility = raz_visibility, \
1984 .reset = kvm_read_sanitised_id_reg, \
1988 static bool access_sp_el1(struct kvm_vcpu *vcpu,
1989 struct sys_reg_params *p,
1990 const struct sys_reg_desc *r)
1993 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
1995 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2000 static bool access_elr(struct kvm_vcpu *vcpu,
2001 struct sys_reg_params *p,
2002 const struct sys_reg_desc *r)
2005 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2007 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2012 static bool access_spsr(struct kvm_vcpu *vcpu,
2013 struct sys_reg_params *p,
2014 const struct sys_reg_desc *r)
2017 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
2019 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2025 * Architected system registers.
2026 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
2028 * Debug handling: We do trap most, if not all debug related system
2029 * registers. The implementation is good enough to ensure that a guest
2030 * can use these with minimal performance degradation. The drawback is
2031 * that we don't implement any of the external debug architecture.
2032 * This should be revisited if we ever encounter a more demanding
2035 static const struct sys_reg_desc sys_reg_descs[] = {
2036 { SYS_DESC(SYS_DC_ISW), access_dcsw },
2037 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
2038 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
2039 { SYS_DESC(SYS_DC_CSW), access_dcsw },
2040 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
2041 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
2042 { SYS_DESC(SYS_DC_CISW), access_dcsw },
2043 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
2044 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
2046 DBG_BCR_BVR_WCR_WVR_EL1(0),
2047 DBG_BCR_BVR_WCR_WVR_EL1(1),
2048 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
2049 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
2050 DBG_BCR_BVR_WCR_WVR_EL1(2),
2051 DBG_BCR_BVR_WCR_WVR_EL1(3),
2052 DBG_BCR_BVR_WCR_WVR_EL1(4),
2053 DBG_BCR_BVR_WCR_WVR_EL1(5),
2054 DBG_BCR_BVR_WCR_WVR_EL1(6),
2055 DBG_BCR_BVR_WCR_WVR_EL1(7),
2056 DBG_BCR_BVR_WCR_WVR_EL1(8),
2057 DBG_BCR_BVR_WCR_WVR_EL1(9),
2058 DBG_BCR_BVR_WCR_WVR_EL1(10),
2059 DBG_BCR_BVR_WCR_WVR_EL1(11),
2060 DBG_BCR_BVR_WCR_WVR_EL1(12),
2061 DBG_BCR_BVR_WCR_WVR_EL1(13),
2062 DBG_BCR_BVR_WCR_WVR_EL1(14),
2063 DBG_BCR_BVR_WCR_WVR_EL1(15),
2065 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
2066 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
2067 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
2068 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
2069 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
2070 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
2071 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
2072 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
2073 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
2075 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
2076 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
2077 // DBGDTR[TR]X_EL0 share the same encoding
2078 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
2080 { SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 },
2082 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
2085 * ID regs: all ID_SANITISED() entries here must have corresponding
2086 * entries in arm64_ftr_regs[].
2089 /* AArch64 mappings of the AArch32 ID registers */
2091 AA32_ID_SANITISED(ID_PFR0_EL1),
2092 AA32_ID_SANITISED(ID_PFR1_EL1),
2093 { SYS_DESC(SYS_ID_DFR0_EL1),
2094 .access = access_id_reg,
2095 .get_user = get_id_reg,
2096 .set_user = set_id_dfr0_el1,
2097 .visibility = aa32_id_visibility,
2098 .reset = read_sanitised_id_dfr0_el1,
2099 .val = ID_DFR0_EL1_PerfMon_MASK |
2100 ID_DFR0_EL1_CopDbg_MASK, },
2101 ID_HIDDEN(ID_AFR0_EL1),
2102 AA32_ID_SANITISED(ID_MMFR0_EL1),
2103 AA32_ID_SANITISED(ID_MMFR1_EL1),
2104 AA32_ID_SANITISED(ID_MMFR2_EL1),
2105 AA32_ID_SANITISED(ID_MMFR3_EL1),
2108 AA32_ID_SANITISED(ID_ISAR0_EL1),
2109 AA32_ID_SANITISED(ID_ISAR1_EL1),
2110 AA32_ID_SANITISED(ID_ISAR2_EL1),
2111 AA32_ID_SANITISED(ID_ISAR3_EL1),
2112 AA32_ID_SANITISED(ID_ISAR4_EL1),
2113 AA32_ID_SANITISED(ID_ISAR5_EL1),
2114 AA32_ID_SANITISED(ID_MMFR4_EL1),
2115 AA32_ID_SANITISED(ID_ISAR6_EL1),
2118 AA32_ID_SANITISED(MVFR0_EL1),
2119 AA32_ID_SANITISED(MVFR1_EL1),
2120 AA32_ID_SANITISED(MVFR2_EL1),
2121 ID_UNALLOCATED(3,3),
2122 AA32_ID_SANITISED(ID_PFR2_EL1),
2123 ID_HIDDEN(ID_DFR1_EL1),
2124 AA32_ID_SANITISED(ID_MMFR5_EL1),
2125 ID_UNALLOCATED(3,7),
2127 /* AArch64 ID registers */
2129 { SYS_DESC(SYS_ID_AA64PFR0_EL1),
2130 .access = access_id_reg,
2131 .get_user = get_id_reg,
2132 .set_user = set_id_reg,
2133 .reset = read_sanitised_id_aa64pfr0_el1,
2134 .val = ~(ID_AA64PFR0_EL1_AMU |
2135 ID_AA64PFR0_EL1_MPAM |
2136 ID_AA64PFR0_EL1_SVE |
2137 ID_AA64PFR0_EL1_RAS |
2138 ID_AA64PFR0_EL1_GIC |
2139 ID_AA64PFR0_EL1_AdvSIMD |
2140 ID_AA64PFR0_EL1_FP), },
2141 ID_SANITISED(ID_AA64PFR1_EL1),
2142 ID_UNALLOCATED(4,2),
2143 ID_UNALLOCATED(4,3),
2144 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
2145 ID_HIDDEN(ID_AA64SMFR0_EL1),
2146 ID_UNALLOCATED(4,6),
2147 ID_UNALLOCATED(4,7),
2150 { SYS_DESC(SYS_ID_AA64DFR0_EL1),
2151 .access = access_id_reg,
2152 .get_user = get_id_reg,
2153 .set_user = set_id_aa64dfr0_el1,
2154 .reset = read_sanitised_id_aa64dfr0_el1,
2155 .val = ID_AA64DFR0_EL1_PMUVer_MASK |
2156 ID_AA64DFR0_EL1_DebugVer_MASK, },
2157 ID_SANITISED(ID_AA64DFR1_EL1),
2158 ID_UNALLOCATED(5,2),
2159 ID_UNALLOCATED(5,3),
2160 ID_HIDDEN(ID_AA64AFR0_EL1),
2161 ID_HIDDEN(ID_AA64AFR1_EL1),
2162 ID_UNALLOCATED(5,6),
2163 ID_UNALLOCATED(5,7),
2166 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
2167 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
2168 ID_AA64ISAR1_EL1_GPA |
2169 ID_AA64ISAR1_EL1_API |
2170 ID_AA64ISAR1_EL1_APA)),
2171 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
2172 ID_AA64ISAR2_EL1_APA3 |
2173 ID_AA64ISAR2_EL1_GPA3)),
2174 ID_UNALLOCATED(6,3),
2175 ID_UNALLOCATED(6,4),
2176 ID_UNALLOCATED(6,5),
2177 ID_UNALLOCATED(6,6),
2178 ID_UNALLOCATED(6,7),
2181 ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
2182 ID_AA64MMFR0_EL1_TGRAN4_2 |
2183 ID_AA64MMFR0_EL1_TGRAN64_2 |
2184 ID_AA64MMFR0_EL1_TGRAN16_2)),
2185 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
2186 ID_AA64MMFR1_EL1_HCX |
2187 ID_AA64MMFR1_EL1_XNX |
2188 ID_AA64MMFR1_EL1_TWED |
2189 ID_AA64MMFR1_EL1_XNX |
2190 ID_AA64MMFR1_EL1_VH |
2191 ID_AA64MMFR1_EL1_VMIDBits)),
2192 ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
2193 ID_AA64MMFR2_EL1_EVT |
2194 ID_AA64MMFR2_EL1_FWB |
2195 ID_AA64MMFR2_EL1_IDS |
2196 ID_AA64MMFR2_EL1_NV |
2197 ID_AA64MMFR2_EL1_CCIDX)),
2198 ID_SANITISED(ID_AA64MMFR3_EL1),
2199 ID_UNALLOCATED(7,4),
2200 ID_UNALLOCATED(7,5),
2201 ID_UNALLOCATED(7,6),
2202 ID_UNALLOCATED(7,7),
2204 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
2205 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
2206 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
2211 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
2212 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
2213 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
2214 { SYS_DESC(SYS_SMCR_EL1), undef_access },
2215 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2216 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2217 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
2218 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
2226 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
2227 { SYS_DESC(SYS_ELR_EL1), access_elr},
2229 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2230 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2231 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
2233 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2234 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2235 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2236 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2237 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2238 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2239 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2240 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2243 MTE_REG(TFSRE0_EL1),
2245 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
2246 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
2248 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
2249 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
2250 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
2251 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
2252 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
2253 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
2254 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
2255 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
2256 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
2257 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
2258 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
2259 /* PMBIDR_EL1 is not trapped */
2261 { PMU_SYS_REG(PMINTENSET_EL1),
2262 .access = access_pminten, .reg = PMINTENSET_EL1,
2263 .get_user = get_pmreg, .set_user = set_pmreg },
2264 { PMU_SYS_REG(PMINTENCLR_EL1),
2265 .access = access_pminten, .reg = PMINTENSET_EL1,
2266 .get_user = get_pmreg, .set_user = set_pmreg },
2267 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
2269 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
2270 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
2271 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
2272 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
2274 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
2275 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
2276 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
2277 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
2278 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
2280 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
2281 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
2283 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
2284 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
2285 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
2286 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
2287 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
2288 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
2289 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
2290 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
2291 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
2292 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
2293 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
2294 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
2296 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
2297 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
2299 { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
2301 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
2303 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
2305 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
2306 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
2307 .set_user = set_clidr },
2308 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
2309 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
2310 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2311 { SYS_DESC(SYS_CTR_EL0), access_ctr },
2312 { SYS_DESC(SYS_SVCR), undef_access },
2314 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
2315 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
2316 { PMU_SYS_REG(PMCNTENSET_EL0),
2317 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2318 .get_user = get_pmreg, .set_user = set_pmreg },
2319 { PMU_SYS_REG(PMCNTENCLR_EL0),
2320 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2321 .get_user = get_pmreg, .set_user = set_pmreg },
2322 { PMU_SYS_REG(PMOVSCLR_EL0),
2323 .access = access_pmovs, .reg = PMOVSSET_EL0,
2324 .get_user = get_pmreg, .set_user = set_pmreg },
2326 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2327 * previously (and pointlessly) advertised in the past...
2329 { PMU_SYS_REG(PMSWINC_EL0),
2330 .get_user = get_raz_reg, .set_user = set_wi_reg,
2331 .access = access_pmswinc, .reset = NULL },
2332 { PMU_SYS_REG(PMSELR_EL0),
2333 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2334 { PMU_SYS_REG(PMCEID0_EL0),
2335 .access = access_pmceid, .reset = NULL },
2336 { PMU_SYS_REG(PMCEID1_EL0),
2337 .access = access_pmceid, .reset = NULL },
2338 { PMU_SYS_REG(PMCCNTR_EL0),
2339 .access = access_pmu_evcntr, .reset = reset_unknown,
2340 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2341 { PMU_SYS_REG(PMXEVTYPER_EL0),
2342 .access = access_pmu_evtyper, .reset = NULL },
2343 { PMU_SYS_REG(PMXEVCNTR_EL0),
2344 .access = access_pmu_evcntr, .reset = NULL },
2346 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2347 * in 32bit mode. Here we choose to reset it as zero for consistency.
2349 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
2350 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2351 { PMU_SYS_REG(PMOVSSET_EL0),
2352 .access = access_pmovs, .reg = PMOVSSET_EL0,
2353 .get_user = get_pmreg, .set_user = set_pmreg },
2355 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2356 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2357 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2359 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2361 { SYS_DESC(SYS_AMCR_EL0), undef_access },
2362 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2363 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2364 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2365 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2366 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2367 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2368 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2369 AMU_AMEVCNTR0_EL0(0),
2370 AMU_AMEVCNTR0_EL0(1),
2371 AMU_AMEVCNTR0_EL0(2),
2372 AMU_AMEVCNTR0_EL0(3),
2373 AMU_AMEVCNTR0_EL0(4),
2374 AMU_AMEVCNTR0_EL0(5),
2375 AMU_AMEVCNTR0_EL0(6),
2376 AMU_AMEVCNTR0_EL0(7),
2377 AMU_AMEVCNTR0_EL0(8),
2378 AMU_AMEVCNTR0_EL0(9),
2379 AMU_AMEVCNTR0_EL0(10),
2380 AMU_AMEVCNTR0_EL0(11),
2381 AMU_AMEVCNTR0_EL0(12),
2382 AMU_AMEVCNTR0_EL0(13),
2383 AMU_AMEVCNTR0_EL0(14),
2384 AMU_AMEVCNTR0_EL0(15),
2385 AMU_AMEVTYPER0_EL0(0),
2386 AMU_AMEVTYPER0_EL0(1),
2387 AMU_AMEVTYPER0_EL0(2),
2388 AMU_AMEVTYPER0_EL0(3),
2389 AMU_AMEVTYPER0_EL0(4),
2390 AMU_AMEVTYPER0_EL0(5),
2391 AMU_AMEVTYPER0_EL0(6),
2392 AMU_AMEVTYPER0_EL0(7),
2393 AMU_AMEVTYPER0_EL0(8),
2394 AMU_AMEVTYPER0_EL0(9),
2395 AMU_AMEVTYPER0_EL0(10),
2396 AMU_AMEVTYPER0_EL0(11),
2397 AMU_AMEVTYPER0_EL0(12),
2398 AMU_AMEVTYPER0_EL0(13),
2399 AMU_AMEVTYPER0_EL0(14),
2400 AMU_AMEVTYPER0_EL0(15),
2401 AMU_AMEVCNTR1_EL0(0),
2402 AMU_AMEVCNTR1_EL0(1),
2403 AMU_AMEVCNTR1_EL0(2),
2404 AMU_AMEVCNTR1_EL0(3),
2405 AMU_AMEVCNTR1_EL0(4),
2406 AMU_AMEVCNTR1_EL0(5),
2407 AMU_AMEVCNTR1_EL0(6),
2408 AMU_AMEVCNTR1_EL0(7),
2409 AMU_AMEVCNTR1_EL0(8),
2410 AMU_AMEVCNTR1_EL0(9),
2411 AMU_AMEVCNTR1_EL0(10),
2412 AMU_AMEVCNTR1_EL0(11),
2413 AMU_AMEVCNTR1_EL0(12),
2414 AMU_AMEVCNTR1_EL0(13),
2415 AMU_AMEVCNTR1_EL0(14),
2416 AMU_AMEVCNTR1_EL0(15),
2417 AMU_AMEVTYPER1_EL0(0),
2418 AMU_AMEVTYPER1_EL0(1),
2419 AMU_AMEVTYPER1_EL0(2),
2420 AMU_AMEVTYPER1_EL0(3),
2421 AMU_AMEVTYPER1_EL0(4),
2422 AMU_AMEVTYPER1_EL0(5),
2423 AMU_AMEVTYPER1_EL0(6),
2424 AMU_AMEVTYPER1_EL0(7),
2425 AMU_AMEVTYPER1_EL0(8),
2426 AMU_AMEVTYPER1_EL0(9),
2427 AMU_AMEVTYPER1_EL0(10),
2428 AMU_AMEVTYPER1_EL0(11),
2429 AMU_AMEVTYPER1_EL0(12),
2430 AMU_AMEVTYPER1_EL0(13),
2431 AMU_AMEVTYPER1_EL0(14),
2432 AMU_AMEVTYPER1_EL0(15),
2434 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2435 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2436 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2437 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2438 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2441 PMU_PMEVCNTR_EL0(0),
2442 PMU_PMEVCNTR_EL0(1),
2443 PMU_PMEVCNTR_EL0(2),
2444 PMU_PMEVCNTR_EL0(3),
2445 PMU_PMEVCNTR_EL0(4),
2446 PMU_PMEVCNTR_EL0(5),
2447 PMU_PMEVCNTR_EL0(6),
2448 PMU_PMEVCNTR_EL0(7),
2449 PMU_PMEVCNTR_EL0(8),
2450 PMU_PMEVCNTR_EL0(9),
2451 PMU_PMEVCNTR_EL0(10),
2452 PMU_PMEVCNTR_EL0(11),
2453 PMU_PMEVCNTR_EL0(12),
2454 PMU_PMEVCNTR_EL0(13),
2455 PMU_PMEVCNTR_EL0(14),
2456 PMU_PMEVCNTR_EL0(15),
2457 PMU_PMEVCNTR_EL0(16),
2458 PMU_PMEVCNTR_EL0(17),
2459 PMU_PMEVCNTR_EL0(18),
2460 PMU_PMEVCNTR_EL0(19),
2461 PMU_PMEVCNTR_EL0(20),
2462 PMU_PMEVCNTR_EL0(21),
2463 PMU_PMEVCNTR_EL0(22),
2464 PMU_PMEVCNTR_EL0(23),
2465 PMU_PMEVCNTR_EL0(24),
2466 PMU_PMEVCNTR_EL0(25),
2467 PMU_PMEVCNTR_EL0(26),
2468 PMU_PMEVCNTR_EL0(27),
2469 PMU_PMEVCNTR_EL0(28),
2470 PMU_PMEVCNTR_EL0(29),
2471 PMU_PMEVCNTR_EL0(30),
2472 /* PMEVTYPERn_EL0 */
2473 PMU_PMEVTYPER_EL0(0),
2474 PMU_PMEVTYPER_EL0(1),
2475 PMU_PMEVTYPER_EL0(2),
2476 PMU_PMEVTYPER_EL0(3),
2477 PMU_PMEVTYPER_EL0(4),
2478 PMU_PMEVTYPER_EL0(5),
2479 PMU_PMEVTYPER_EL0(6),
2480 PMU_PMEVTYPER_EL0(7),
2481 PMU_PMEVTYPER_EL0(8),
2482 PMU_PMEVTYPER_EL0(9),
2483 PMU_PMEVTYPER_EL0(10),
2484 PMU_PMEVTYPER_EL0(11),
2485 PMU_PMEVTYPER_EL0(12),
2486 PMU_PMEVTYPER_EL0(13),
2487 PMU_PMEVTYPER_EL0(14),
2488 PMU_PMEVTYPER_EL0(15),
2489 PMU_PMEVTYPER_EL0(16),
2490 PMU_PMEVTYPER_EL0(17),
2491 PMU_PMEVTYPER_EL0(18),
2492 PMU_PMEVTYPER_EL0(19),
2493 PMU_PMEVTYPER_EL0(20),
2494 PMU_PMEVTYPER_EL0(21),
2495 PMU_PMEVTYPER_EL0(22),
2496 PMU_PMEVTYPER_EL0(23),
2497 PMU_PMEVTYPER_EL0(24),
2498 PMU_PMEVTYPER_EL0(25),
2499 PMU_PMEVTYPER_EL0(26),
2500 PMU_PMEVTYPER_EL0(27),
2501 PMU_PMEVTYPER_EL0(28),
2502 PMU_PMEVTYPER_EL0(29),
2503 PMU_PMEVTYPER_EL0(30),
2505 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2506 * in 32bit mode. Here we choose to reset it as zero for consistency.
2508 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
2509 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2511 EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
2512 EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
2513 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2514 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2515 EL2_REG(HCR_EL2, access_rw, reset_val, 0),
2516 EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2517 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
2518 EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
2519 EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0),
2520 EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0),
2521 EL2_REG(HFGITR_EL2, access_rw, reset_val, 0),
2522 EL2_REG(HACR_EL2, access_rw, reset_val, 0),
2524 EL2_REG(HCRX_EL2, access_rw, reset_val, 0),
2526 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2527 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2528 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2529 EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
2530 EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
2532 { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 },
2533 EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0),
2534 EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0),
2535 EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
2536 EL2_REG(ELR_EL2, access_rw, reset_val, 0),
2537 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2539 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
2540 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi,
2541 .visibility = hidden_user_visibility },
2542 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi,
2543 .visibility = hidden_user_visibility },
2544 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi,
2545 .visibility = hidden_user_visibility },
2546 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi,
2547 .visibility = hidden_user_visibility },
2549 { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 },
2550 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2551 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2552 EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2553 { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 },
2555 EL2_REG(FAR_EL2, access_rw, reset_val, 0),
2556 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2558 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2559 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2561 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2562 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2563 { SYS_DESC(SYS_RMR_EL2), trap_undef },
2565 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2566 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2568 EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
2569 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2571 EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2572 EL12_REG(CPACR, access_rw, reset_val, 0),
2573 EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2574 EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2575 EL12_REG(TCR, access_vm_reg, reset_val, 0),
2576 { SYS_DESC(SYS_SPSR_EL12), access_spsr},
2577 { SYS_DESC(SYS_ELR_EL12), access_elr},
2578 EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2579 EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2580 EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2581 EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2582 EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2583 EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2584 EL12_REG(VBAR, access_rw, reset_val, 0),
2585 EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2586 EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2588 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2591 static const struct sys_reg_desc *first_idreg;
2593 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
2594 struct sys_reg_params *p,
2595 const struct sys_reg_desc *r)
2598 return ignore_write(vcpu, p);
2600 u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
2601 u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
2602 u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
2604 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
2605 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
2606 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
2607 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
2608 (1 << 15) | (el3 << 14) | (el3 << 12));
2614 * AArch32 debug register mappings
2616 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2617 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2619 * None of the other registers share their location, so treat them as
2620 * if they were 64bit.
2622 #define DBG_BCR_BVR_WCR_WVR(n) \
2624 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2626 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2628 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2630 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2632 #define DBGBXVR(n) \
2633 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2636 * Trapped cp14 registers. We generally ignore most of the external
2637 * debug, on the principle that they don't really make sense to a
2638 * guest. Revisit this one day, would this principle change.
2640 static const struct sys_reg_desc cp14_regs[] = {
2642 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2644 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2646 DBG_BCR_BVR_WCR_WVR(0),
2648 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2649 DBG_BCR_BVR_WCR_WVR(1),
2651 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2653 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2654 DBG_BCR_BVR_WCR_WVR(2),
2655 /* DBGDTR[RT]Xint */
2656 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2657 /* DBGDTR[RT]Xext */
2658 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2659 DBG_BCR_BVR_WCR_WVR(3),
2660 DBG_BCR_BVR_WCR_WVR(4),
2661 DBG_BCR_BVR_WCR_WVR(5),
2663 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2665 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2666 DBG_BCR_BVR_WCR_WVR(6),
2668 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2669 DBG_BCR_BVR_WCR_WVR(7),
2670 DBG_BCR_BVR_WCR_WVR(8),
2671 DBG_BCR_BVR_WCR_WVR(9),
2672 DBG_BCR_BVR_WCR_WVR(10),
2673 DBG_BCR_BVR_WCR_WVR(11),
2674 DBG_BCR_BVR_WCR_WVR(12),
2675 DBG_BCR_BVR_WCR_WVR(13),
2676 DBG_BCR_BVR_WCR_WVR(14),
2677 DBG_BCR_BVR_WCR_WVR(15),
2679 /* DBGDRAR (32bit) */
2680 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2684 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2687 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2691 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2694 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2707 /* DBGDSAR (32bit) */
2708 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2711 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2713 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2715 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2717 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2719 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2721 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2724 /* Trapped cp14 64bit registers */
2725 static const struct sys_reg_desc cp14_64_regs[] = {
2726 /* DBGDRAR (64bit) */
2727 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2729 /* DBGDSAR (64bit) */
2730 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2733 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2735 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2736 .visibility = pmu_visibility
2738 /* Macro to expand the PMEVCNTRn register */
2739 #define PMU_PMEVCNTR(n) \
2740 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2741 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2742 .access = access_pmu_evcntr }
2744 /* Macro to expand the PMEVTYPERn register */
2745 #define PMU_PMEVTYPER(n) \
2746 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2747 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2748 .access = access_pmu_evtyper }
2750 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2751 * depending on the way they are accessed (as a 32bit or a 64bit
2754 static const struct sys_reg_desc cp15_regs[] = {
2755 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2756 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2758 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2760 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2761 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2762 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2764 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2766 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2767 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2769 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2770 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2772 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2774 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2776 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2778 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2781 * DC{C,I,CI}SW operations:
2783 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2784 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2785 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2788 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2789 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2790 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2791 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2792 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2793 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2794 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2795 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2796 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2797 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2798 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2799 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2800 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2801 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2802 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2803 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2804 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2806 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2809 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2811 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2813 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2815 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2818 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2820 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2823 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2824 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2891 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2893 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2894 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2897 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
2899 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2902 static const struct sys_reg_desc cp15_64_regs[] = {
2903 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2904 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2905 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2906 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
2907 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2908 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2909 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2910 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2911 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
2914 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2919 for (i = 0; i < n; i++) {
2920 if (!is_32 && table[i].reg && !table[i].reset) {
2921 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2925 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2926 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2934 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2936 kvm_inject_undefined(vcpu);
2940 static void perform_access(struct kvm_vcpu *vcpu,
2941 struct sys_reg_params *params,
2942 const struct sys_reg_desc *r)
2944 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2946 /* Check for regs disabled by runtime config */
2947 if (sysreg_hidden(vcpu, r)) {
2948 kvm_inject_undefined(vcpu);
2953 * Not having an accessor means that we have configured a trap
2954 * that we don't know how to handle. This certainly qualifies
2955 * as a gross bug that should be fixed right away.
2959 /* Skip instruction if instructed so */
2960 if (likely(r->access(vcpu, params, r)))
2965 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2966 * call the corresponding trap handler.
2968 * @params: pointer to the descriptor of the access
2969 * @table: array of trap descriptors
2970 * @num: size of the trap descriptor array
2972 * Return true if the access has been handled, false if not.
2974 static bool emulate_cp(struct kvm_vcpu *vcpu,
2975 struct sys_reg_params *params,
2976 const struct sys_reg_desc *table,
2979 const struct sys_reg_desc *r;
2982 return false; /* Not handled */
2984 r = find_reg(params, table, num);
2987 perform_access(vcpu, params, r);
2995 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2996 struct sys_reg_params *params)
2998 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
3002 case ESR_ELx_EC_CP15_32:
3003 case ESR_ELx_EC_CP15_64:
3006 case ESR_ELx_EC_CP14_MR:
3007 case ESR_ELx_EC_CP14_64:
3014 print_sys_reg_msg(params,
3015 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
3016 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3017 kvm_inject_undefined(vcpu);
3021 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
3022 * @vcpu: The VCPU pointer
3023 * @run: The kvm_run struct
3025 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
3026 const struct sys_reg_desc *global,
3029 struct sys_reg_params params;
3030 u64 esr = kvm_vcpu_get_esr(vcpu);
3031 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3032 int Rt2 = (esr >> 10) & 0x1f;
3034 params.CRm = (esr >> 1) & 0xf;
3035 params.is_write = ((esr & 1) == 0);
3038 params.Op1 = (esr >> 16) & 0xf;
3043 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
3044 * backends between AArch32 and AArch64, we get away with it.
3046 if (params.is_write) {
3047 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
3048 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
3052 * If the table contains a handler, handle the
3053 * potential register operation in the case of a read and return
3056 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
3057 /* Split up the value between registers for the read side */
3058 if (!params.is_write) {
3059 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
3060 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
3066 unhandled_cp_access(vcpu, ¶ms);
3070 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
3073 * The CP10 ID registers are architecturally mapped to AArch64 feature
3074 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
3077 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
3079 u8 reg_id = (esr >> 10) & 0xf;
3082 params->is_write = ((esr & 1) == 0);
3088 /* CP10 ID registers are read-only */
3089 valid = !params->is_write;
3111 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
3112 params->is_write ? "write" : "read", reg_id);
3117 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
3118 * VFP Register' from AArch32.
3119 * @vcpu: The vCPU pointer
3121 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
3122 * Work out the correct AArch64 system register encoding and reroute to the
3123 * AArch64 system register emulation.
3125 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
3127 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3128 u64 esr = kvm_vcpu_get_esr(vcpu);
3129 struct sys_reg_params params;
3131 /* UNDEF on any unhandled register access */
3132 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
3133 kvm_inject_undefined(vcpu);
3137 if (emulate_sys_reg(vcpu, ¶ms))
3138 vcpu_set_reg(vcpu, Rt, params.regval);
3144 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3145 * CRn=0, which corresponds to the AArch32 feature
3147 * @vcpu: the vCPU pointer
3148 * @params: the system register access parameters.
3150 * Our cp15 system register tables do not enumerate the AArch32 feature
3151 * registers. Conveniently, our AArch64 table does, and the AArch32 system
3152 * register encoding can be trivially remapped into the AArch64 for the feature
3153 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
3155 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3156 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
3157 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
3158 * treat undefined registers in this range as RAZ.
3160 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
3161 struct sys_reg_params *params)
3163 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3165 /* Treat impossible writes to RO registers as UNDEFINED */
3166 if (params->is_write) {
3167 unhandled_cp_access(vcpu, params);
3174 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3175 * Avoid conflicting with future expansion of AArch64 feature registers
3176 * and simply treat them as RAZ here.
3178 if (params->CRm > 3)
3180 else if (!emulate_sys_reg(vcpu, params))
3183 vcpu_set_reg(vcpu, Rt, params->regval);
3188 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3189 * @vcpu: The VCPU pointer
3190 * @run: The kvm_run struct
3192 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
3193 struct sys_reg_params *params,
3194 const struct sys_reg_desc *global,
3197 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3199 params->regval = vcpu_get_reg(vcpu, Rt);
3201 if (emulate_cp(vcpu, params, global, nr_global)) {
3202 if (!params->is_write)
3203 vcpu_set_reg(vcpu, Rt, params->regval);
3207 unhandled_cp_access(vcpu, params);
3211 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
3213 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
3216 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
3218 struct sys_reg_params params;
3220 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3223 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3224 * system register table. Registers in the ID range where CRm=0 are
3225 * excluded from this scheme as they do not trivially map into AArch64
3226 * system register encodings.
3228 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
3229 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
3231 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
3234 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
3236 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
3239 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
3241 struct sys_reg_params params;
3243 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3245 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
3248 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
3250 // See ARM DDI 0487E.a, section D12.3.2
3251 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
3255 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3256 * @vcpu: The VCPU pointer
3257 * @params: Decoded system register parameters
3259 * Return: true if the system register access was successful, false otherwise.
3261 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
3262 struct sys_reg_params *params)
3264 const struct sys_reg_desc *r;
3266 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3269 perform_access(vcpu, params, r);
3273 if (is_imp_def_sys_reg(params)) {
3274 kvm_inject_undefined(vcpu);
3276 print_sys_reg_msg(params,
3277 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3278 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3279 kvm_inject_undefined(vcpu);
3284 static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
3286 const struct sys_reg_desc *idreg = first_idreg;
3287 u32 id = reg_to_encoding(idreg);
3288 struct kvm *kvm = vcpu->kvm;
3290 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
3293 lockdep_assert_held(&kvm->arch.config_lock);
3295 /* Initialize all idregs */
3296 while (is_id_reg(id)) {
3297 IDREG(kvm, id) = idreg->reset(vcpu, idreg);
3300 id = reg_to_encoding(idreg);
3303 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
3307 * kvm_reset_sys_regs - sets system registers to reset value
3308 * @vcpu: The VCPU pointer
3310 * This function finds the right table above and sets the registers on the
3311 * virtual CPU struct to their architecturally defined reset values.
3313 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
3317 kvm_reset_id_regs(vcpu);
3319 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
3320 const struct sys_reg_desc *r = &sys_reg_descs[i];
3322 if (is_id_reg(reg_to_encoding(r)))
3331 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
3332 * @vcpu: The VCPU pointer
3334 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
3336 struct sys_reg_params params;
3337 unsigned long esr = kvm_vcpu_get_esr(vcpu);
3338 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3340 trace_kvm_handle_sys_reg(esr);
3342 if (__check_nv_sr_forward(vcpu))
3345 params = esr_sys64_to_params(esr);
3346 params.regval = vcpu_get_reg(vcpu, Rt);
3348 if (!emulate_sys_reg(vcpu, ¶ms))
3351 if (!params.is_write)
3352 vcpu_set_reg(vcpu, Rt, params.regval);
3356 /******************************************************************************
3358 *****************************************************************************/
3360 static bool index_to_params(u64 id, struct sys_reg_params *params)
3362 switch (id & KVM_REG_SIZE_MASK) {
3363 case KVM_REG_SIZE_U64:
3364 /* Any unused index bits means it's not valid. */
3365 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
3366 | KVM_REG_ARM_COPROC_MASK
3367 | KVM_REG_ARM64_SYSREG_OP0_MASK
3368 | KVM_REG_ARM64_SYSREG_OP1_MASK
3369 | KVM_REG_ARM64_SYSREG_CRN_MASK
3370 | KVM_REG_ARM64_SYSREG_CRM_MASK
3371 | KVM_REG_ARM64_SYSREG_OP2_MASK))
3373 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
3374 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
3375 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
3376 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
3377 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
3378 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
3379 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
3380 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
3381 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
3382 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3389 const struct sys_reg_desc *get_reg_by_id(u64 id,
3390 const struct sys_reg_desc table[],
3393 struct sys_reg_params params;
3395 if (!index_to_params(id, ¶ms))
3398 return find_reg(¶ms, table, num);
3401 /* Decode an index value, and find the sys_reg_desc entry. */
3402 static const struct sys_reg_desc *
3403 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3404 const struct sys_reg_desc table[], unsigned int num)
3407 const struct sys_reg_desc *r;
3409 /* We only do sys_reg for now. */
3410 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3413 r = get_reg_by_id(id, table, num);
3415 /* Not saved in the sys_reg array and not otherwise accessible? */
3416 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
3423 * These are the invariant sys_reg registers: we let the guest see the
3424 * host versions of these, so they're part of the guest state.
3426 * A future CPU may provide a mechanism to present different values to
3427 * the guest, or a future kvm may trap them.
3430 #define FUNCTION_INVARIANT(reg) \
3431 static u64 get_##reg(struct kvm_vcpu *v, \
3432 const struct sys_reg_desc *r) \
3434 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3435 return ((struct sys_reg_desc *)r)->val; \
3438 FUNCTION_INVARIANT(midr_el1)
3439 FUNCTION_INVARIANT(revidr_el1)
3440 FUNCTION_INVARIANT(aidr_el1)
3442 static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3444 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3445 return ((struct sys_reg_desc *)r)->val;
3448 /* ->val is filled in by kvm_sys_reg_table_init() */
3449 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
3450 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3451 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
3452 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3453 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
3456 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
3458 const struct sys_reg_desc *r;
3460 r = get_reg_by_id(id, invariant_sys_regs,
3461 ARRAY_SIZE(invariant_sys_regs));
3465 return put_user(r->val, uaddr);
3468 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
3470 const struct sys_reg_desc *r;
3473 r = get_reg_by_id(id, invariant_sys_regs,
3474 ARRAY_SIZE(invariant_sys_regs));
3478 if (get_user(val, uaddr))
3481 /* This is what we mean by invariant: you can't change it. */
3488 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3491 u32 __user *uval = uaddr;
3493 /* Fail if we have unknown bits set. */
3494 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3495 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3498 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3499 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3500 if (KVM_REG_SIZE(id) != 4)
3502 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3503 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3504 if (val >= CSSELR_MAX)
3507 return put_user(get_ccsidr(vcpu, val), uval);
3513 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3516 u32 __user *uval = uaddr;
3518 /* Fail if we have unknown bits set. */
3519 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3520 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3523 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3524 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3525 if (KVM_REG_SIZE(id) != 4)
3527 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3528 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3529 if (val >= CSSELR_MAX)
3532 if (get_user(newval, uval))
3535 return set_ccsidr(vcpu, val, newval);
3541 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3542 const struct sys_reg_desc table[], unsigned int num)
3544 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3545 const struct sys_reg_desc *r;
3549 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3550 if (!r || sysreg_hidden_user(vcpu, r))
3554 ret = (r->get_user)(vcpu, r, &val);
3556 val = __vcpu_sys_reg(vcpu, r->reg);
3561 ret = put_user(val, uaddr);
3566 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3568 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3571 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3572 return demux_c15_get(vcpu, reg->id, uaddr);
3574 err = get_invariant_sys_reg(reg->id, uaddr);
3578 return kvm_sys_reg_get_user(vcpu, reg,
3579 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3582 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3583 const struct sys_reg_desc table[], unsigned int num)
3585 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3586 const struct sys_reg_desc *r;
3590 if (get_user(val, uaddr))
3593 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3594 if (!r || sysreg_hidden_user(vcpu, r))
3597 if (sysreg_user_write_ignore(vcpu, r))
3601 ret = (r->set_user)(vcpu, r, val);
3603 __vcpu_sys_reg(vcpu, r->reg) = val;
3610 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3612 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3615 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3616 return demux_c15_set(vcpu, reg->id, uaddr);
3618 err = set_invariant_sys_reg(reg->id, uaddr);
3622 return kvm_sys_reg_set_user(vcpu, reg,
3623 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3626 static unsigned int num_demux_regs(void)
3631 static int write_demux_regids(u64 __user *uindices)
3633 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3636 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3637 for (i = 0; i < CSSELR_MAX; i++) {
3638 if (put_user(val | i, uindices))
3645 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
3647 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3648 KVM_REG_ARM64_SYSREG |
3649 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3650 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3651 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3652 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3653 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3656 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
3661 if (put_user(sys_reg_to_index(reg), *uind))
3668 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
3669 const struct sys_reg_desc *rd,
3671 unsigned int *total)
3674 * Ignore registers we trap but don't save,
3675 * and for which no custom user accessor is provided.
3677 if (!(rd->reg || rd->get_user))
3680 if (sysreg_hidden_user(vcpu, rd))
3683 if (!copy_reg_to_user(rd, uind))
3690 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3691 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3693 const struct sys_reg_desc *i2, *end2;
3694 unsigned int total = 0;
3698 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3700 while (i2 != end2) {
3701 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
3708 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3710 return ARRAY_SIZE(invariant_sys_regs)
3712 + walk_sys_regs(vcpu, (u64 __user *)NULL);
3715 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3720 /* Then give them all the invariant registers' indices. */
3721 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3722 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3727 err = walk_sys_regs(vcpu, uindices);
3732 return write_demux_regids(uindices);
3735 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
3736 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
3742 static bool is_feature_id_reg(u32 encoding)
3744 return (sys_reg_Op0(encoding) == 3 &&
3745 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
3746 sys_reg_CRn(encoding) == 0 &&
3747 sys_reg_CRm(encoding) <= 7);
3750 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
3752 const void *zero_page = page_to_virt(ZERO_PAGE(0));
3753 u64 __user *masks = (u64 __user *)range->addr;
3755 /* Only feature id range is supported, reserved[13] must be zero. */
3757 memcmp(range->reserved, zero_page, sizeof(range->reserved)))
3760 /* Wipe the whole thing first */
3761 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
3764 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
3765 const struct sys_reg_desc *reg = &sys_reg_descs[i];
3766 u32 encoding = reg_to_encoding(reg);
3769 if (!is_feature_id_reg(encoding) || !reg->set_user)
3773 * For ID registers, we return the writable mask. Other feature
3774 * registers return a full 64bit mask. That's not necessary
3775 * compliant with a given revision of the architecture, but the
3776 * RES0/RES1 definitions allow us to do that.
3778 if (is_id_reg(encoding)) {
3780 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0()))
3787 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
3794 int __init kvm_sys_reg_table_init(void)
3796 struct sys_reg_params params;
3800 /* Make sure tables are unique and in order. */
3801 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3802 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3803 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3804 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3805 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3806 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3811 /* We abuse the reset function to overwrite the table itself. */
3812 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3813 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
3815 /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
3816 params = encoding_to_params(SYS_ID_PFR0_EL1);
3817 first_idreg = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3821 if (kvm_get_mode() == KVM_MODE_NV)
3822 return populate_nv_trap_config();