2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/uaccess.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
39 #include <asm/sysreg.h>
41 #include <trace/events/kvm.h>
48 * All of this file is extremly similar to the ARM coproc.c, but the
49 * types are different. My gut feeling is that it should be pretty
50 * easy to merge, but that would be an ABI breakage -- again. VFP
51 * would also need to be abstracted.
53 * For AArch32, we only take care of what is being trapped. Anything
54 * that has to do with init and userspace access has to go via the
58 static bool read_from_write_only(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r)
62 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
63 print_sys_reg_instr(params);
64 kvm_inject_undefined(vcpu);
68 static bool write_to_read_only(struct kvm_vcpu *vcpu,
69 struct sys_reg_params *params,
70 const struct sys_reg_desc *r)
72 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
73 print_sys_reg_instr(params);
74 kvm_inject_undefined(vcpu);
78 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
79 static u32 cache_levels;
81 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
84 /* Which cache CCSIDR represents depends on CSSELR value. */
85 static u32 get_ccsidr(u32 csselr)
89 /* Make sure noone else changes CSSELR during this! */
91 write_sysreg(csselr, csselr_el1);
93 ccsidr = read_sysreg(ccsidr_el1);
100 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
102 static bool access_dcsw(struct kvm_vcpu *vcpu,
103 struct sys_reg_params *p,
104 const struct sys_reg_desc *r)
107 return read_from_write_only(vcpu, p, r);
109 kvm_set_way_flush(vcpu);
114 * Generic accessor for VM registers. Only called as long as HCR_TVM
115 * is set. If the guest enables the MMU, we stop trapping the VM
116 * sys_regs and leave it in complete control of the caches.
118 static bool access_vm_reg(struct kvm_vcpu *vcpu,
119 struct sys_reg_params *p,
120 const struct sys_reg_desc *r)
122 bool was_enabled = vcpu_has_cache_enabled(vcpu);
124 BUG_ON(!p->is_write);
126 if (!p->is_aarch32) {
127 vcpu_sys_reg(vcpu, r->reg) = p->regval;
130 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
131 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
134 kvm_toggle_cache(vcpu, was_enabled);
139 * Trap handler for the GICv3 SGI generation system register.
140 * Forward the request to the VGIC emulation.
141 * The cp15_64 code makes sure this automatically works
142 * for both AArch64 and AArch32 accesses.
144 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
145 struct sys_reg_params *p,
146 const struct sys_reg_desc *r)
149 return read_from_write_only(vcpu, p, r);
151 vgic_v3_dispatch_sgi(vcpu, p->regval);
156 static bool access_gic_sre(struct kvm_vcpu *vcpu,
157 struct sys_reg_params *p,
158 const struct sys_reg_desc *r)
161 return ignore_write(vcpu, p);
163 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
167 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
168 struct sys_reg_params *p,
169 const struct sys_reg_desc *r)
172 return ignore_write(vcpu, p);
174 return read_zero(vcpu, p);
177 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
178 struct sys_reg_params *p,
179 const struct sys_reg_desc *r)
182 return ignore_write(vcpu, p);
184 p->regval = (1 << 3);
189 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
190 struct sys_reg_params *p,
191 const struct sys_reg_desc *r)
194 return ignore_write(vcpu, p);
196 p->regval = read_sysreg(dbgauthstatus_el1);
202 * We want to avoid world-switching all the DBG registers all the
205 * - If we've touched any debug register, it is likely that we're
206 * going to touch more of them. It then makes sense to disable the
207 * traps and start doing the save/restore dance
208 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
209 * then mandatory to save/restore the registers, as the guest
212 * For this, we use a DIRTY bit, indicating the guest has modified the
213 * debug registers, used as follow:
216 * - If the dirty bit is set (because we're coming back from trapping),
217 * disable the traps, save host registers, restore guest registers.
218 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
219 * set the dirty bit, disable the traps, save host registers,
220 * restore guest registers.
221 * - Otherwise, enable the traps
224 * - If the dirty bit is set, save guest registers, restore host
225 * registers and clear the dirty bit. This ensure that the host can
226 * now use the debug registers.
228 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
229 struct sys_reg_params *p,
230 const struct sys_reg_desc *r)
233 vcpu_sys_reg(vcpu, r->reg) = p->regval;
234 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
236 p->regval = vcpu_sys_reg(vcpu, r->reg);
239 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
245 * reg_to_dbg/dbg_to_reg
247 * A 32 bit write to a debug register leave top bits alone
248 * A 32 bit read from a debug register only returns the bottom bits
250 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
251 * hyp.S code switches between host and guest values in future.
253 static void reg_to_dbg(struct kvm_vcpu *vcpu,
254 struct sys_reg_params *p,
261 val |= ((*dbg_reg >> 32) << 32);
265 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
268 static void dbg_to_reg(struct kvm_vcpu *vcpu,
269 struct sys_reg_params *p,
272 p->regval = *dbg_reg;
274 p->regval &= 0xffffffffUL;
277 static bool trap_bvr(struct kvm_vcpu *vcpu,
278 struct sys_reg_params *p,
279 const struct sys_reg_desc *rd)
281 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
284 reg_to_dbg(vcpu, p, dbg_reg);
286 dbg_to_reg(vcpu, p, dbg_reg);
288 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
293 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
294 const struct kvm_one_reg *reg, void __user *uaddr)
296 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
298 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
303 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
304 const struct kvm_one_reg *reg, void __user *uaddr)
306 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
308 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
313 static void reset_bvr(struct kvm_vcpu *vcpu,
314 const struct sys_reg_desc *rd)
316 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
319 static bool trap_bcr(struct kvm_vcpu *vcpu,
320 struct sys_reg_params *p,
321 const struct sys_reg_desc *rd)
323 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
326 reg_to_dbg(vcpu, p, dbg_reg);
328 dbg_to_reg(vcpu, p, dbg_reg);
330 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
335 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
336 const struct kvm_one_reg *reg, void __user *uaddr)
338 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
340 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
346 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
347 const struct kvm_one_reg *reg, void __user *uaddr)
349 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
351 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
356 static void reset_bcr(struct kvm_vcpu *vcpu,
357 const struct sys_reg_desc *rd)
359 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
362 static bool trap_wvr(struct kvm_vcpu *vcpu,
363 struct sys_reg_params *p,
364 const struct sys_reg_desc *rd)
366 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
369 reg_to_dbg(vcpu, p, dbg_reg);
371 dbg_to_reg(vcpu, p, dbg_reg);
373 trace_trap_reg(__func__, rd->reg, p->is_write,
374 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
379 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
380 const struct kvm_one_reg *reg, void __user *uaddr)
382 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
384 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
389 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
390 const struct kvm_one_reg *reg, void __user *uaddr)
392 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
394 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
399 static void reset_wvr(struct kvm_vcpu *vcpu,
400 const struct sys_reg_desc *rd)
402 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
405 static bool trap_wcr(struct kvm_vcpu *vcpu,
406 struct sys_reg_params *p,
407 const struct sys_reg_desc *rd)
409 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
412 reg_to_dbg(vcpu, p, dbg_reg);
414 dbg_to_reg(vcpu, p, dbg_reg);
416 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
421 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
422 const struct kvm_one_reg *reg, void __user *uaddr)
424 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
426 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
431 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
432 const struct kvm_one_reg *reg, void __user *uaddr)
434 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
436 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
441 static void reset_wcr(struct kvm_vcpu *vcpu,
442 const struct sys_reg_desc *rd)
444 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
447 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
449 vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
452 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
457 * Map the vcpu_id into the first three affinity level fields of
458 * the MPIDR. We limit the number of VCPUs in level 0 due to a
459 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
460 * of the GICv3 to be able to address each CPU directly when
463 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
464 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
465 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
466 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
469 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
473 /* No PMU available, PMCR_EL0 may UNDEF... */
474 if (!kvm_arm_support_pmu_v3())
477 pmcr = read_sysreg(pmcr_el0);
479 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
480 * except PMCR.E resetting to zero.
482 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
483 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
484 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
487 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
489 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
490 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
493 kvm_inject_undefined(vcpu);
498 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
500 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
503 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
505 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
508 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
510 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
513 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
515 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
518 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
519 const struct sys_reg_desc *r)
523 if (!kvm_arm_pmu_v3_ready(vcpu))
524 return trap_raz_wi(vcpu, p, r);
526 if (pmu_access_el0_disabled(vcpu))
530 /* Only update writeable bits of PMCR */
531 val = vcpu_sys_reg(vcpu, PMCR_EL0);
532 val &= ~ARMV8_PMU_PMCR_MASK;
533 val |= p->regval & ARMV8_PMU_PMCR_MASK;
534 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
535 kvm_pmu_handle_pmcr(vcpu, val);
537 /* PMCR.P & PMCR.C are RAZ */
538 val = vcpu_sys_reg(vcpu, PMCR_EL0)
539 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
546 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
547 const struct sys_reg_desc *r)
549 if (!kvm_arm_pmu_v3_ready(vcpu))
550 return trap_raz_wi(vcpu, p, r);
552 if (pmu_access_event_counter_el0_disabled(vcpu))
556 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
558 /* return PMSELR.SEL field */
559 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
560 & ARMV8_PMU_COUNTER_MASK;
565 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
566 const struct sys_reg_desc *r)
570 if (!kvm_arm_pmu_v3_ready(vcpu))
571 return trap_raz_wi(vcpu, p, r);
575 if (pmu_access_el0_disabled(vcpu))
579 pmceid = read_sysreg(pmceid0_el0);
581 pmceid = read_sysreg(pmceid1_el0);
588 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
592 pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
593 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
594 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
595 kvm_inject_undefined(vcpu);
602 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
603 struct sys_reg_params *p,
604 const struct sys_reg_desc *r)
608 if (!kvm_arm_pmu_v3_ready(vcpu))
609 return trap_raz_wi(vcpu, p, r);
611 if (r->CRn == 9 && r->CRm == 13) {
614 if (pmu_access_event_counter_el0_disabled(vcpu))
617 idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
618 & ARMV8_PMU_COUNTER_MASK;
619 } else if (r->Op2 == 0) {
621 if (pmu_access_cycle_counter_el0_disabled(vcpu))
624 idx = ARMV8_PMU_CYCLE_IDX;
628 } else if (r->CRn == 0 && r->CRm == 9) {
630 if (pmu_access_event_counter_el0_disabled(vcpu))
633 idx = ARMV8_PMU_CYCLE_IDX;
634 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
636 if (pmu_access_event_counter_el0_disabled(vcpu))
639 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
644 if (!pmu_counter_idx_valid(vcpu, idx))
648 if (pmu_access_el0_disabled(vcpu))
651 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
653 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
659 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
660 const struct sys_reg_desc *r)
664 if (!kvm_arm_pmu_v3_ready(vcpu))
665 return trap_raz_wi(vcpu, p, r);
667 if (pmu_access_el0_disabled(vcpu))
670 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
672 idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
673 reg = PMEVTYPER0_EL0 + idx;
674 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
675 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
676 if (idx == ARMV8_PMU_CYCLE_IDX)
680 reg = PMEVTYPER0_EL0 + idx;
685 if (!pmu_counter_idx_valid(vcpu, idx))
689 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
690 vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
692 p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
698 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
699 const struct sys_reg_desc *r)
703 if (!kvm_arm_pmu_v3_ready(vcpu))
704 return trap_raz_wi(vcpu, p, r);
706 if (pmu_access_el0_disabled(vcpu))
709 mask = kvm_pmu_valid_counter_mask(vcpu);
711 val = p->regval & mask;
713 /* accessing PMCNTENSET_EL0 */
714 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
715 kvm_pmu_enable_counter(vcpu, val);
717 /* accessing PMCNTENCLR_EL0 */
718 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
719 kvm_pmu_disable_counter(vcpu, val);
722 p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
728 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
729 const struct sys_reg_desc *r)
731 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
733 if (!kvm_arm_pmu_v3_ready(vcpu))
734 return trap_raz_wi(vcpu, p, r);
736 if (!vcpu_mode_priv(vcpu)) {
737 kvm_inject_undefined(vcpu);
742 u64 val = p->regval & mask;
745 /* accessing PMINTENSET_EL1 */
746 vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
748 /* accessing PMINTENCLR_EL1 */
749 vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
751 p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
757 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
758 const struct sys_reg_desc *r)
760 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
762 if (!kvm_arm_pmu_v3_ready(vcpu))
763 return trap_raz_wi(vcpu, p, r);
765 if (pmu_access_el0_disabled(vcpu))
770 /* accessing PMOVSSET_EL0 */
771 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
773 /* accessing PMOVSCLR_EL0 */
774 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
776 p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
782 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
783 const struct sys_reg_desc *r)
787 if (!kvm_arm_pmu_v3_ready(vcpu))
788 return trap_raz_wi(vcpu, p, r);
791 return read_from_write_only(vcpu, p, r);
793 if (pmu_write_swinc_el0_disabled(vcpu))
796 mask = kvm_pmu_valid_counter_mask(vcpu);
797 kvm_pmu_software_increment(vcpu, p->regval & mask);
801 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
802 const struct sys_reg_desc *r)
804 if (!kvm_arm_pmu_v3_ready(vcpu))
805 return trap_raz_wi(vcpu, p, r);
808 if (!vcpu_mode_priv(vcpu)) {
809 kvm_inject_undefined(vcpu);
813 vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
814 & ARMV8_PMU_USERENR_MASK;
816 p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
817 & ARMV8_PMU_USERENR_MASK;
823 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
824 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
825 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
826 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
827 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
828 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
829 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
830 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
831 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
832 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
834 /* Macro to expand the PMEVCNTRn_EL0 register */
835 #define PMU_PMEVCNTR_EL0(n) \
836 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
837 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
839 /* Macro to expand the PMEVTYPERn_EL0 register */
840 #define PMU_PMEVTYPER_EL0(n) \
841 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
842 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
844 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
845 struct sys_reg_params *p,
846 const struct sys_reg_desc *r)
848 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
849 u64 now = kvm_phys_timer_read();
852 ptimer->cnt_cval = p->regval + now;
854 p->regval = ptimer->cnt_cval - now;
859 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
860 struct sys_reg_params *p,
861 const struct sys_reg_desc *r)
863 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
866 /* ISTATUS bit is read-only */
867 ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
869 u64 now = kvm_phys_timer_read();
871 p->regval = ptimer->cnt_ctl;
873 * Set ISTATUS bit if it's expired.
874 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
875 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
876 * regardless of ENABLE bit for our implementation convenience.
878 if (ptimer->cnt_cval <= now)
879 p->regval |= ARCH_TIMER_CTRL_IT_STAT;
885 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
886 struct sys_reg_params *p,
887 const struct sys_reg_desc *r)
889 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
892 ptimer->cnt_cval = p->regval;
894 p->regval = ptimer->cnt_cval;
900 * Architected system registers.
901 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
903 * Debug handling: We do trap most, if not all debug related system
904 * registers. The implementation is good enough to ensure that a guest
905 * can use these with minimal performance degradation. The drawback is
906 * that we don't implement any of the external debug, none of the
907 * OSlock protocol. This should be revisited if we ever encounter a
908 * more demanding guest...
910 static const struct sys_reg_desc sys_reg_descs[] = {
911 { SYS_DESC(SYS_DC_ISW), access_dcsw },
912 { SYS_DESC(SYS_DC_CSW), access_dcsw },
913 { SYS_DESC(SYS_DC_CISW), access_dcsw },
915 DBG_BCR_BVR_WCR_WVR_EL1(0),
916 DBG_BCR_BVR_WCR_WVR_EL1(1),
917 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
918 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
919 DBG_BCR_BVR_WCR_WVR_EL1(2),
920 DBG_BCR_BVR_WCR_WVR_EL1(3),
921 DBG_BCR_BVR_WCR_WVR_EL1(4),
922 DBG_BCR_BVR_WCR_WVR_EL1(5),
923 DBG_BCR_BVR_WCR_WVR_EL1(6),
924 DBG_BCR_BVR_WCR_WVR_EL1(7),
925 DBG_BCR_BVR_WCR_WVR_EL1(8),
926 DBG_BCR_BVR_WCR_WVR_EL1(9),
927 DBG_BCR_BVR_WCR_WVR_EL1(10),
928 DBG_BCR_BVR_WCR_WVR_EL1(11),
929 DBG_BCR_BVR_WCR_WVR_EL1(12),
930 DBG_BCR_BVR_WCR_WVR_EL1(13),
931 DBG_BCR_BVR_WCR_WVR_EL1(14),
932 DBG_BCR_BVR_WCR_WVR_EL1(15),
934 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
935 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
936 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
937 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
938 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
939 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
940 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
941 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
943 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
944 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
945 // DBGDTR[TR]X_EL0 share the same encoding
946 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
948 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
950 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
951 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
952 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
953 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
954 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
955 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
957 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
958 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
959 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
960 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
961 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
963 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
964 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
966 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
967 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
969 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
971 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
972 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
973 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
974 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
975 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
976 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
977 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
978 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
979 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
980 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
982 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
983 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
985 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
987 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
989 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
990 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
991 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
992 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
993 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
994 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
995 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
996 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
997 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
998 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
999 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1001 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1002 * in 32bit mode. Here we choose to reset it as zero for consistency.
1004 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1005 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1007 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1008 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1010 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1011 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1012 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1015 PMU_PMEVCNTR_EL0(0),
1016 PMU_PMEVCNTR_EL0(1),
1017 PMU_PMEVCNTR_EL0(2),
1018 PMU_PMEVCNTR_EL0(3),
1019 PMU_PMEVCNTR_EL0(4),
1020 PMU_PMEVCNTR_EL0(5),
1021 PMU_PMEVCNTR_EL0(6),
1022 PMU_PMEVCNTR_EL0(7),
1023 PMU_PMEVCNTR_EL0(8),
1024 PMU_PMEVCNTR_EL0(9),
1025 PMU_PMEVCNTR_EL0(10),
1026 PMU_PMEVCNTR_EL0(11),
1027 PMU_PMEVCNTR_EL0(12),
1028 PMU_PMEVCNTR_EL0(13),
1029 PMU_PMEVCNTR_EL0(14),
1030 PMU_PMEVCNTR_EL0(15),
1031 PMU_PMEVCNTR_EL0(16),
1032 PMU_PMEVCNTR_EL0(17),
1033 PMU_PMEVCNTR_EL0(18),
1034 PMU_PMEVCNTR_EL0(19),
1035 PMU_PMEVCNTR_EL0(20),
1036 PMU_PMEVCNTR_EL0(21),
1037 PMU_PMEVCNTR_EL0(22),
1038 PMU_PMEVCNTR_EL0(23),
1039 PMU_PMEVCNTR_EL0(24),
1040 PMU_PMEVCNTR_EL0(25),
1041 PMU_PMEVCNTR_EL0(26),
1042 PMU_PMEVCNTR_EL0(27),
1043 PMU_PMEVCNTR_EL0(28),
1044 PMU_PMEVCNTR_EL0(29),
1045 PMU_PMEVCNTR_EL0(30),
1046 /* PMEVTYPERn_EL0 */
1047 PMU_PMEVTYPER_EL0(0),
1048 PMU_PMEVTYPER_EL0(1),
1049 PMU_PMEVTYPER_EL0(2),
1050 PMU_PMEVTYPER_EL0(3),
1051 PMU_PMEVTYPER_EL0(4),
1052 PMU_PMEVTYPER_EL0(5),
1053 PMU_PMEVTYPER_EL0(6),
1054 PMU_PMEVTYPER_EL0(7),
1055 PMU_PMEVTYPER_EL0(8),
1056 PMU_PMEVTYPER_EL0(9),
1057 PMU_PMEVTYPER_EL0(10),
1058 PMU_PMEVTYPER_EL0(11),
1059 PMU_PMEVTYPER_EL0(12),
1060 PMU_PMEVTYPER_EL0(13),
1061 PMU_PMEVTYPER_EL0(14),
1062 PMU_PMEVTYPER_EL0(15),
1063 PMU_PMEVTYPER_EL0(16),
1064 PMU_PMEVTYPER_EL0(17),
1065 PMU_PMEVTYPER_EL0(18),
1066 PMU_PMEVTYPER_EL0(19),
1067 PMU_PMEVTYPER_EL0(20),
1068 PMU_PMEVTYPER_EL0(21),
1069 PMU_PMEVTYPER_EL0(22),
1070 PMU_PMEVTYPER_EL0(23),
1071 PMU_PMEVTYPER_EL0(24),
1072 PMU_PMEVTYPER_EL0(25),
1073 PMU_PMEVTYPER_EL0(26),
1074 PMU_PMEVTYPER_EL0(27),
1075 PMU_PMEVTYPER_EL0(28),
1076 PMU_PMEVTYPER_EL0(29),
1077 PMU_PMEVTYPER_EL0(30),
1079 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1080 * in 32bit mode. Here we choose to reset it as zero for consistency.
1082 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1084 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1085 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1086 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1089 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1090 struct sys_reg_params *p,
1091 const struct sys_reg_desc *r)
1094 return ignore_write(vcpu, p);
1096 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1097 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1098 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1100 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1101 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1102 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1103 | (6 << 16) | (el3 << 14) | (el3 << 12));
1108 static bool trap_debug32(struct kvm_vcpu *vcpu,
1109 struct sys_reg_params *p,
1110 const struct sys_reg_desc *r)
1113 vcpu_cp14(vcpu, r->reg) = p->regval;
1114 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1116 p->regval = vcpu_cp14(vcpu, r->reg);
1122 /* AArch32 debug register mappings
1124 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1125 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1127 * All control registers and watchpoint value registers are mapped to
1128 * the lower 32 bits of their AArch64 equivalents. We share the trap
1129 * handlers with the above AArch64 code which checks what mode the
1133 static bool trap_xvr(struct kvm_vcpu *vcpu,
1134 struct sys_reg_params *p,
1135 const struct sys_reg_desc *rd)
1137 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1142 val &= 0xffffffffUL;
1143 val |= p->regval << 32;
1146 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1148 p->regval = *dbg_reg >> 32;
1151 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1156 #define DBG_BCR_BVR_WCR_WVR(n) \
1158 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1160 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1162 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1164 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1166 #define DBGBXVR(n) \
1167 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1170 * Trapped cp14 registers. We generally ignore most of the external
1171 * debug, on the principle that they don't really make sense to a
1172 * guest. Revisit this one day, would this principle change.
1174 static const struct sys_reg_desc cp14_regs[] = {
1176 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1178 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1180 DBG_BCR_BVR_WCR_WVR(0),
1182 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1183 DBG_BCR_BVR_WCR_WVR(1),
1185 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
1187 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
1188 DBG_BCR_BVR_WCR_WVR(2),
1189 /* DBGDTR[RT]Xint */
1190 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1191 /* DBGDTR[RT]Xext */
1192 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1193 DBG_BCR_BVR_WCR_WVR(3),
1194 DBG_BCR_BVR_WCR_WVR(4),
1195 DBG_BCR_BVR_WCR_WVR(5),
1197 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1199 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1200 DBG_BCR_BVR_WCR_WVR(6),
1202 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
1203 DBG_BCR_BVR_WCR_WVR(7),
1204 DBG_BCR_BVR_WCR_WVR(8),
1205 DBG_BCR_BVR_WCR_WVR(9),
1206 DBG_BCR_BVR_WCR_WVR(10),
1207 DBG_BCR_BVR_WCR_WVR(11),
1208 DBG_BCR_BVR_WCR_WVR(12),
1209 DBG_BCR_BVR_WCR_WVR(13),
1210 DBG_BCR_BVR_WCR_WVR(14),
1211 DBG_BCR_BVR_WCR_WVR(15),
1213 /* DBGDRAR (32bit) */
1214 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1218 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1221 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1225 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1228 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1241 /* DBGDSAR (32bit) */
1242 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1245 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1247 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1249 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1251 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1253 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1255 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1258 /* Trapped cp14 64bit registers */
1259 static const struct sys_reg_desc cp14_64_regs[] = {
1260 /* DBGDRAR (64bit) */
1261 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1263 /* DBGDSAR (64bit) */
1264 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1267 /* Macro to expand the PMEVCNTRn register */
1268 #define PMU_PMEVCNTR(n) \
1270 { Op1(0), CRn(0b1110), \
1271 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1274 /* Macro to expand the PMEVTYPERn register */
1275 #define PMU_PMEVTYPER(n) \
1277 { Op1(0), CRn(0b1110), \
1278 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1279 access_pmu_evtyper }
1282 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1283 * depending on the way they are accessed (as a 32bit or a 64bit
1286 static const struct sys_reg_desc cp15_regs[] = {
1287 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1289 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1290 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1291 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1292 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1293 { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
1294 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1295 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1296 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1297 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1298 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1299 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1300 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1303 * DC{C,I,CI}SW operations:
1305 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1306 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1307 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1310 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1311 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1312 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1313 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1314 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1315 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1316 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1317 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1318 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1319 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1320 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1321 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1322 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1323 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1324 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1326 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1327 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1328 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1329 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1332 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1334 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1401 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1404 static const struct sys_reg_desc cp15_64_regs[] = {
1405 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1406 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1407 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1408 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1411 /* Target specific emulation tables */
1412 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1414 void kvm_register_target_sys_reg_table(unsigned int target,
1415 struct kvm_sys_reg_target_table *table)
1417 target_tables[target] = table;
1420 /* Get specific register table for this target. */
1421 static const struct sys_reg_desc *get_target_table(unsigned target,
1425 struct kvm_sys_reg_target_table *table;
1427 table = target_tables[target];
1429 *num = table->table64.num;
1430 return table->table64.table;
1432 *num = table->table32.num;
1433 return table->table32.table;
1437 #define reg_to_match_value(x) \
1439 unsigned long val; \
1440 val = (x)->Op0 << 14; \
1441 val |= (x)->Op1 << 11; \
1442 val |= (x)->CRn << 7; \
1443 val |= (x)->CRm << 3; \
1448 static int match_sys_reg(const void *key, const void *elt)
1450 const unsigned long pval = (unsigned long)key;
1451 const struct sys_reg_desc *r = elt;
1453 return pval - reg_to_match_value(r);
1456 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1457 const struct sys_reg_desc table[],
1460 unsigned long pval = reg_to_match_value(params);
1462 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1465 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1467 kvm_inject_undefined(vcpu);
1471 static void perform_access(struct kvm_vcpu *vcpu,
1472 struct sys_reg_params *params,
1473 const struct sys_reg_desc *r)
1476 * Not having an accessor means that we have configured a trap
1477 * that we don't know how to handle. This certainly qualifies
1478 * as a gross bug that should be fixed right away.
1482 /* Skip instruction if instructed so */
1483 if (likely(r->access(vcpu, params, r)))
1484 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1488 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1489 * call the corresponding trap handler.
1491 * @params: pointer to the descriptor of the access
1492 * @table: array of trap descriptors
1493 * @num: size of the trap descriptor array
1495 * Return 0 if the access has been handled, and -1 if not.
1497 static int emulate_cp(struct kvm_vcpu *vcpu,
1498 struct sys_reg_params *params,
1499 const struct sys_reg_desc *table,
1502 const struct sys_reg_desc *r;
1505 return -1; /* Not handled */
1507 r = find_reg(params, table, num);
1510 perform_access(vcpu, params, r);
1518 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1519 struct sys_reg_params *params)
1521 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1525 case ESR_ELx_EC_CP15_32:
1526 case ESR_ELx_EC_CP15_64:
1529 case ESR_ELx_EC_CP14_MR:
1530 case ESR_ELx_EC_CP14_64:
1537 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1538 cp, *vcpu_pc(vcpu));
1539 print_sys_reg_instr(params);
1540 kvm_inject_undefined(vcpu);
1544 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1545 * @vcpu: The VCPU pointer
1546 * @run: The kvm_run struct
1548 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1549 const struct sys_reg_desc *global,
1551 const struct sys_reg_desc *target_specific,
1554 struct sys_reg_params params;
1555 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1556 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1557 int Rt2 = (hsr >> 10) & 0x1f;
1559 params.is_aarch32 = true;
1560 params.is_32bit = false;
1561 params.CRm = (hsr >> 1) & 0xf;
1562 params.is_write = ((hsr & 1) == 0);
1565 params.Op1 = (hsr >> 16) & 0xf;
1570 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1571 * backends between AArch32 and AArch64, we get away with it.
1573 if (params.is_write) {
1574 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1575 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1579 * Try to emulate the coprocessor access using the target
1580 * specific table first, and using the global table afterwards.
1581 * If either of the tables contains a handler, handle the
1582 * potential register operation in the case of a read and return
1585 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1586 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1587 /* Split up the value between registers for the read side */
1588 if (!params.is_write) {
1589 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1590 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1596 unhandled_cp_access(vcpu, ¶ms);
1601 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1602 * @vcpu: The VCPU pointer
1603 * @run: The kvm_run struct
1605 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1606 const struct sys_reg_desc *global,
1608 const struct sys_reg_desc *target_specific,
1611 struct sys_reg_params params;
1612 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1613 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1615 params.is_aarch32 = true;
1616 params.is_32bit = true;
1617 params.CRm = (hsr >> 1) & 0xf;
1618 params.regval = vcpu_get_reg(vcpu, Rt);
1619 params.is_write = ((hsr & 1) == 0);
1620 params.CRn = (hsr >> 10) & 0xf;
1622 params.Op1 = (hsr >> 14) & 0x7;
1623 params.Op2 = (hsr >> 17) & 0x7;
1625 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1626 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1627 if (!params.is_write)
1628 vcpu_set_reg(vcpu, Rt, params.regval);
1632 unhandled_cp_access(vcpu, ¶ms);
1636 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1638 const struct sys_reg_desc *target_specific;
1641 target_specific = get_target_table(vcpu->arch.target, false, &num);
1642 return kvm_handle_cp_64(vcpu,
1643 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1644 target_specific, num);
1647 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1649 const struct sys_reg_desc *target_specific;
1652 target_specific = get_target_table(vcpu->arch.target, false, &num);
1653 return kvm_handle_cp_32(vcpu,
1654 cp15_regs, ARRAY_SIZE(cp15_regs),
1655 target_specific, num);
1658 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1660 return kvm_handle_cp_64(vcpu,
1661 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1665 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1667 return kvm_handle_cp_32(vcpu,
1668 cp14_regs, ARRAY_SIZE(cp14_regs),
1672 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1673 struct sys_reg_params *params)
1676 const struct sys_reg_desc *table, *r;
1678 table = get_target_table(vcpu->arch.target, true, &num);
1680 /* Search target-specific then generic table. */
1681 r = find_reg(params, table, num);
1683 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1686 perform_access(vcpu, params, r);
1688 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1690 print_sys_reg_instr(params);
1691 kvm_inject_undefined(vcpu);
1696 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1697 const struct sys_reg_desc *table, size_t num)
1701 for (i = 0; i < num; i++)
1703 table[i].reset(vcpu, &table[i]);
1707 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1708 * @vcpu: The VCPU pointer
1709 * @run: The kvm_run struct
1711 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1713 struct sys_reg_params params;
1714 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1715 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1718 trace_kvm_handle_sys_reg(esr);
1720 params.is_aarch32 = false;
1721 params.is_32bit = false;
1722 params.Op0 = (esr >> 20) & 3;
1723 params.Op1 = (esr >> 14) & 0x7;
1724 params.CRn = (esr >> 10) & 0xf;
1725 params.CRm = (esr >> 1) & 0xf;
1726 params.Op2 = (esr >> 17) & 0x7;
1727 params.regval = vcpu_get_reg(vcpu, Rt);
1728 params.is_write = !(esr & 1);
1730 ret = emulate_sys_reg(vcpu, ¶ms);
1732 if (!params.is_write)
1733 vcpu_set_reg(vcpu, Rt, params.regval);
1737 /******************************************************************************
1739 *****************************************************************************/
1741 static bool index_to_params(u64 id, struct sys_reg_params *params)
1743 switch (id & KVM_REG_SIZE_MASK) {
1744 case KVM_REG_SIZE_U64:
1745 /* Any unused index bits means it's not valid. */
1746 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1747 | KVM_REG_ARM_COPROC_MASK
1748 | KVM_REG_ARM64_SYSREG_OP0_MASK
1749 | KVM_REG_ARM64_SYSREG_OP1_MASK
1750 | KVM_REG_ARM64_SYSREG_CRN_MASK
1751 | KVM_REG_ARM64_SYSREG_CRM_MASK
1752 | KVM_REG_ARM64_SYSREG_OP2_MASK))
1754 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1755 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1756 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1757 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1758 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1759 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1760 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1761 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1762 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1763 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1770 const struct sys_reg_desc *find_reg_by_id(u64 id,
1771 struct sys_reg_params *params,
1772 const struct sys_reg_desc table[],
1775 if (!index_to_params(id, params))
1778 return find_reg(params, table, num);
1781 /* Decode an index value, and find the sys_reg_desc entry. */
1782 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1786 const struct sys_reg_desc *table, *r;
1787 struct sys_reg_params params;
1789 /* We only do sys_reg for now. */
1790 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1793 if (!index_to_params(id, ¶ms))
1796 table = get_target_table(vcpu->arch.target, true, &num);
1797 r = find_reg(¶ms, table, num);
1799 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1801 /* Not saved in the sys_reg array? */
1809 * These are the invariant sys_reg registers: we let the guest see the
1810 * host versions of these, so they're part of the guest state.
1812 * A future CPU may provide a mechanism to present different values to
1813 * the guest, or a future kvm may trap them.
1816 #define FUNCTION_INVARIANT(reg) \
1817 static void get_##reg(struct kvm_vcpu *v, \
1818 const struct sys_reg_desc *r) \
1820 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
1823 FUNCTION_INVARIANT(midr_el1)
1824 FUNCTION_INVARIANT(ctr_el0)
1825 FUNCTION_INVARIANT(revidr_el1)
1826 FUNCTION_INVARIANT(id_pfr0_el1)
1827 FUNCTION_INVARIANT(id_pfr1_el1)
1828 FUNCTION_INVARIANT(id_dfr0_el1)
1829 FUNCTION_INVARIANT(id_afr0_el1)
1830 FUNCTION_INVARIANT(id_mmfr0_el1)
1831 FUNCTION_INVARIANT(id_mmfr1_el1)
1832 FUNCTION_INVARIANT(id_mmfr2_el1)
1833 FUNCTION_INVARIANT(id_mmfr3_el1)
1834 FUNCTION_INVARIANT(id_isar0_el1)
1835 FUNCTION_INVARIANT(id_isar1_el1)
1836 FUNCTION_INVARIANT(id_isar2_el1)
1837 FUNCTION_INVARIANT(id_isar3_el1)
1838 FUNCTION_INVARIANT(id_isar4_el1)
1839 FUNCTION_INVARIANT(id_isar5_el1)
1840 FUNCTION_INVARIANT(clidr_el1)
1841 FUNCTION_INVARIANT(aidr_el1)
1843 /* ->val is filled in by kvm_sys_reg_table_init() */
1844 static struct sys_reg_desc invariant_sys_regs[] = {
1845 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
1846 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
1847 { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
1848 { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
1849 { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
1850 { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
1851 { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
1852 { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
1853 { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
1854 { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
1855 { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
1856 { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
1857 { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
1858 { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
1859 { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
1860 { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
1861 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
1862 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
1863 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
1866 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1868 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1873 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1875 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1880 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1882 struct sys_reg_params params;
1883 const struct sys_reg_desc *r;
1885 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
1886 ARRAY_SIZE(invariant_sys_regs));
1890 return reg_to_user(uaddr, &r->val, id);
1893 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1895 struct sys_reg_params params;
1896 const struct sys_reg_desc *r;
1898 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1900 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
1901 ARRAY_SIZE(invariant_sys_regs));
1905 err = reg_from_user(&val, uaddr, id);
1909 /* This is what we mean by invariant: you can't change it. */
1916 static bool is_valid_cache(u32 val)
1920 if (val >= CSSELR_MAX)
1923 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1925 ctype = (cache_levels >> (level * 3)) & 7;
1928 case 0: /* No cache */
1930 case 1: /* Instruction cache only */
1932 case 2: /* Data cache only */
1933 case 4: /* Unified cache */
1935 case 3: /* Separate instruction and data caches */
1937 default: /* Reserved: we can't know instruction or data. */
1942 static int demux_c15_get(u64 id, void __user *uaddr)
1945 u32 __user *uval = uaddr;
1947 /* Fail if we have unknown bits set. */
1948 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1949 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1952 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1953 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1954 if (KVM_REG_SIZE(id) != 4)
1956 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1957 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1958 if (!is_valid_cache(val))
1961 return put_user(get_ccsidr(val), uval);
1967 static int demux_c15_set(u64 id, void __user *uaddr)
1970 u32 __user *uval = uaddr;
1972 /* Fail if we have unknown bits set. */
1973 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1974 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1977 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1978 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1979 if (KVM_REG_SIZE(id) != 4)
1981 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1982 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1983 if (!is_valid_cache(val))
1986 if (get_user(newval, uval))
1989 /* This is also invariant: you can't change it. */
1990 if (newval != get_ccsidr(val))
1998 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2000 const struct sys_reg_desc *r;
2001 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2003 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2004 return demux_c15_get(reg->id, uaddr);
2006 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2009 r = index_to_sys_reg_desc(vcpu, reg->id);
2011 return get_invariant_sys_reg(reg->id, uaddr);
2014 return (r->get_user)(vcpu, r, reg, uaddr);
2016 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2019 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2021 const struct sys_reg_desc *r;
2022 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2024 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2025 return demux_c15_set(reg->id, uaddr);
2027 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2030 r = index_to_sys_reg_desc(vcpu, reg->id);
2032 return set_invariant_sys_reg(reg->id, uaddr);
2035 return (r->set_user)(vcpu, r, reg, uaddr);
2037 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2040 static unsigned int num_demux_regs(void)
2042 unsigned int i, count = 0;
2044 for (i = 0; i < CSSELR_MAX; i++)
2045 if (is_valid_cache(i))
2051 static int write_demux_regids(u64 __user *uindices)
2053 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2056 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2057 for (i = 0; i < CSSELR_MAX; i++) {
2058 if (!is_valid_cache(i))
2060 if (put_user(val | i, uindices))
2067 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2069 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2070 KVM_REG_ARM64_SYSREG |
2071 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2072 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2073 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2074 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2075 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2078 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2083 if (put_user(sys_reg_to_index(reg), *uind))
2090 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2091 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2093 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2094 unsigned int total = 0;
2097 /* We check for duplicates here, to allow arch-specific overrides. */
2098 i1 = get_target_table(vcpu->arch.target, true, &num);
2101 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2103 BUG_ON(i1 == end1 || i2 == end2);
2105 /* Walk carefully, as both tables may refer to the same register. */
2107 int cmp = cmp_sys_reg(i1, i2);
2108 /* target-specific overrides generic entry. */
2110 /* Ignore registers we trap but don't save. */
2112 if (!copy_reg_to_user(i1, &uind))
2117 /* Ignore registers we trap but don't save. */
2119 if (!copy_reg_to_user(i2, &uind))
2125 if (cmp <= 0 && ++i1 == end1)
2127 if (cmp >= 0 && ++i2 == end2)
2133 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2135 return ARRAY_SIZE(invariant_sys_regs)
2137 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2140 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2145 /* Then give them all the invariant registers' indices. */
2146 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2147 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2152 err = walk_sys_regs(vcpu, uindices);
2157 return write_demux_regids(uindices);
2160 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2164 for (i = 1; i < n; i++) {
2165 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2166 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2174 void kvm_sys_reg_table_init(void)
2177 struct sys_reg_desc clidr;
2179 /* Make sure tables are unique and in order. */
2180 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2181 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2182 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2183 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2184 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2185 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2187 /* We abuse the reset function to overwrite the table itself. */
2188 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2189 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2192 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2194 * If software reads the Cache Type fields from Ctype1
2195 * upwards, once it has seen a value of 0b000, no caches
2196 * exist at further-out levels of the hierarchy. So, for
2197 * example, if Ctype3 is the first Cache Type field with a
2198 * value of 0b000, the values of Ctype4 to Ctype7 must be
2201 get_clidr_el1(NULL, &clidr); /* Ugly... */
2202 cache_levels = clidr.val;
2203 for (i = 0; i < 7; i++)
2204 if (((cache_levels >> (i*3)) & 7) == 0)
2206 /* Clear all higher bits. */
2207 cache_levels &= (1 << (i*3))-1;
2211 * kvm_reset_sys_regs - sets system registers to reset value
2212 * @vcpu: The VCPU pointer
2214 * This function finds the right table above and sets the registers on the
2215 * virtual CPU struct to their architecturally defined reset values.
2217 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2220 const struct sys_reg_desc *table;
2222 /* Catch someone adding a register without putting in reset entry. */
2223 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2225 /* Generic chip reset first (so target could override). */
2226 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2228 table = get_target_table(vcpu->arch.target, true, &num);
2229 reset_sys_reg_descs(vcpu, table, num);
2231 for (num = 1; num < NR_SYS_REGS; num++)
2232 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2233 panic("Didn't reset vcpu_sys_reg(%zi)", num);