2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/uaccess.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
39 #include <asm/sysreg.h>
41 #include <trace/events/kvm.h>
48 * All of this file is extremly similar to the ARM coproc.c, but the
49 * types are different. My gut feeling is that it should be pretty
50 * easy to merge, but that would be an ABI breakage -- again. VFP
51 * would also need to be abstracted.
53 * For AArch32, we only take care of what is being trapped. Anything
54 * that has to do with init and userspace access has to go via the
58 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
59 static u32 cache_levels;
61 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
64 /* Which cache CCSIDR represents depends on CSSELR value. */
65 static u32 get_ccsidr(u32 csselr)
69 /* Make sure noone else changes CSSELR during this! */
71 write_sysreg(csselr, csselr_el1);
73 ccsidr = read_sysreg(ccsidr_el1);
80 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
82 static bool access_dcsw(struct kvm_vcpu *vcpu,
83 struct sys_reg_params *p,
84 const struct sys_reg_desc *r)
87 return read_from_write_only(vcpu, p);
89 kvm_set_way_flush(vcpu);
94 * Generic accessor for VM registers. Only called as long as HCR_TVM
95 * is set. If the guest enables the MMU, we stop trapping the VM
96 * sys_regs and leave it in complete control of the caches.
98 static bool access_vm_reg(struct kvm_vcpu *vcpu,
99 struct sys_reg_params *p,
100 const struct sys_reg_desc *r)
102 bool was_enabled = vcpu_has_cache_enabled(vcpu);
104 BUG_ON(!p->is_write);
106 if (!p->is_aarch32) {
107 vcpu_sys_reg(vcpu, r->reg) = p->regval;
110 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
111 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
114 kvm_toggle_cache(vcpu, was_enabled);
119 * Trap handler for the GICv3 SGI generation system register.
120 * Forward the request to the VGIC emulation.
121 * The cp15_64 code makes sure this automatically works
122 * for both AArch64 and AArch32 accesses.
124 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125 struct sys_reg_params *p,
126 const struct sys_reg_desc *r)
129 return read_from_write_only(vcpu, p);
131 vgic_v3_dispatch_sgi(vcpu, p->regval);
136 static bool access_gic_sre(struct kvm_vcpu *vcpu,
137 struct sys_reg_params *p,
138 const struct sys_reg_desc *r)
141 return ignore_write(vcpu, p);
143 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
147 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
148 struct sys_reg_params *p,
149 const struct sys_reg_desc *r)
152 return ignore_write(vcpu, p);
154 return read_zero(vcpu, p);
157 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
158 struct sys_reg_params *p,
159 const struct sys_reg_desc *r)
162 return ignore_write(vcpu, p);
164 p->regval = (1 << 3);
169 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
170 struct sys_reg_params *p,
171 const struct sys_reg_desc *r)
174 return ignore_write(vcpu, p);
176 p->regval = read_sysreg(dbgauthstatus_el1);
182 * We want to avoid world-switching all the DBG registers all the
185 * - If we've touched any debug register, it is likely that we're
186 * going to touch more of them. It then makes sense to disable the
187 * traps and start doing the save/restore dance
188 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
189 * then mandatory to save/restore the registers, as the guest
192 * For this, we use a DIRTY bit, indicating the guest has modified the
193 * debug registers, used as follow:
196 * - If the dirty bit is set (because we're coming back from trapping),
197 * disable the traps, save host registers, restore guest registers.
198 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
199 * set the dirty bit, disable the traps, save host registers,
200 * restore guest registers.
201 * - Otherwise, enable the traps
204 * - If the dirty bit is set, save guest registers, restore host
205 * registers and clear the dirty bit. This ensure that the host can
206 * now use the debug registers.
208 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
209 struct sys_reg_params *p,
210 const struct sys_reg_desc *r)
213 vcpu_sys_reg(vcpu, r->reg) = p->regval;
214 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
216 p->regval = vcpu_sys_reg(vcpu, r->reg);
219 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
225 * reg_to_dbg/dbg_to_reg
227 * A 32 bit write to a debug register leave top bits alone
228 * A 32 bit read from a debug register only returns the bottom bits
230 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
231 * hyp.S code switches between host and guest values in future.
233 static void reg_to_dbg(struct kvm_vcpu *vcpu,
234 struct sys_reg_params *p,
241 val |= ((*dbg_reg >> 32) << 32);
245 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
248 static void dbg_to_reg(struct kvm_vcpu *vcpu,
249 struct sys_reg_params *p,
252 p->regval = *dbg_reg;
254 p->regval &= 0xffffffffUL;
257 static bool trap_bvr(struct kvm_vcpu *vcpu,
258 struct sys_reg_params *p,
259 const struct sys_reg_desc *rd)
261 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
264 reg_to_dbg(vcpu, p, dbg_reg);
266 dbg_to_reg(vcpu, p, dbg_reg);
268 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
273 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
274 const struct kvm_one_reg *reg, void __user *uaddr)
276 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
278 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
283 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
284 const struct kvm_one_reg *reg, void __user *uaddr)
286 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
288 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
293 static void reset_bvr(struct kvm_vcpu *vcpu,
294 const struct sys_reg_desc *rd)
296 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
299 static bool trap_bcr(struct kvm_vcpu *vcpu,
300 struct sys_reg_params *p,
301 const struct sys_reg_desc *rd)
303 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
306 reg_to_dbg(vcpu, p, dbg_reg);
308 dbg_to_reg(vcpu, p, dbg_reg);
310 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
315 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
316 const struct kvm_one_reg *reg, void __user *uaddr)
318 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
320 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
326 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
327 const struct kvm_one_reg *reg, void __user *uaddr)
329 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
331 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
336 static void reset_bcr(struct kvm_vcpu *vcpu,
337 const struct sys_reg_desc *rd)
339 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
342 static bool trap_wvr(struct kvm_vcpu *vcpu,
343 struct sys_reg_params *p,
344 const struct sys_reg_desc *rd)
346 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
349 reg_to_dbg(vcpu, p, dbg_reg);
351 dbg_to_reg(vcpu, p, dbg_reg);
353 trace_trap_reg(__func__, rd->reg, p->is_write,
354 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
359 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
360 const struct kvm_one_reg *reg, void __user *uaddr)
362 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
364 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
369 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
370 const struct kvm_one_reg *reg, void __user *uaddr)
372 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
374 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
379 static void reset_wvr(struct kvm_vcpu *vcpu,
380 const struct sys_reg_desc *rd)
382 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
385 static bool trap_wcr(struct kvm_vcpu *vcpu,
386 struct sys_reg_params *p,
387 const struct sys_reg_desc *rd)
389 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
392 reg_to_dbg(vcpu, p, dbg_reg);
394 dbg_to_reg(vcpu, p, dbg_reg);
396 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
401 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
402 const struct kvm_one_reg *reg, void __user *uaddr)
404 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
406 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
411 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
412 const struct kvm_one_reg *reg, void __user *uaddr)
414 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
416 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
421 static void reset_wcr(struct kvm_vcpu *vcpu,
422 const struct sys_reg_desc *rd)
424 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
427 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
429 vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
432 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
437 * Map the vcpu_id into the first three affinity level fields of
438 * the MPIDR. We limit the number of VCPUs in level 0 due to a
439 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
440 * of the GICv3 to be able to address each CPU directly when
443 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
444 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
445 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
446 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
449 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
453 /* No PMU available, PMCR_EL0 may UNDEF... */
454 if (!kvm_arm_support_pmu_v3())
457 pmcr = read_sysreg(pmcr_el0);
459 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
460 * except PMCR.E resetting to zero.
462 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
463 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
464 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
467 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
469 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
471 return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
474 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
476 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
478 return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
479 || vcpu_mode_priv(vcpu));
482 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
484 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
486 return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
487 || vcpu_mode_priv(vcpu));
490 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
492 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
494 return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
495 || vcpu_mode_priv(vcpu));
498 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
499 const struct sys_reg_desc *r)
503 if (!kvm_arm_pmu_v3_ready(vcpu))
504 return trap_raz_wi(vcpu, p, r);
506 if (pmu_access_el0_disabled(vcpu))
510 /* Only update writeable bits of PMCR */
511 val = vcpu_sys_reg(vcpu, PMCR_EL0);
512 val &= ~ARMV8_PMU_PMCR_MASK;
513 val |= p->regval & ARMV8_PMU_PMCR_MASK;
514 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
515 kvm_pmu_handle_pmcr(vcpu, val);
517 /* PMCR.P & PMCR.C are RAZ */
518 val = vcpu_sys_reg(vcpu, PMCR_EL0)
519 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
526 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
527 const struct sys_reg_desc *r)
529 if (!kvm_arm_pmu_v3_ready(vcpu))
530 return trap_raz_wi(vcpu, p, r);
532 if (pmu_access_event_counter_el0_disabled(vcpu))
536 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
538 /* return PMSELR.SEL field */
539 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
540 & ARMV8_PMU_COUNTER_MASK;
545 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
546 const struct sys_reg_desc *r)
550 if (!kvm_arm_pmu_v3_ready(vcpu))
551 return trap_raz_wi(vcpu, p, r);
555 if (pmu_access_el0_disabled(vcpu))
559 pmceid = read_sysreg(pmceid0_el0);
561 pmceid = read_sysreg(pmceid1_el0);
568 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
572 pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
573 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
574 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
580 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
581 struct sys_reg_params *p,
582 const struct sys_reg_desc *r)
586 if (!kvm_arm_pmu_v3_ready(vcpu))
587 return trap_raz_wi(vcpu, p, r);
589 if (r->CRn == 9 && r->CRm == 13) {
592 if (pmu_access_event_counter_el0_disabled(vcpu))
595 idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
596 & ARMV8_PMU_COUNTER_MASK;
597 } else if (r->Op2 == 0) {
599 if (pmu_access_cycle_counter_el0_disabled(vcpu))
602 idx = ARMV8_PMU_CYCLE_IDX;
606 } else if (r->CRn == 0 && r->CRm == 9) {
608 if (pmu_access_event_counter_el0_disabled(vcpu))
611 idx = ARMV8_PMU_CYCLE_IDX;
612 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
614 if (pmu_access_event_counter_el0_disabled(vcpu))
617 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
622 if (!pmu_counter_idx_valid(vcpu, idx))
626 if (pmu_access_el0_disabled(vcpu))
629 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
631 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
637 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
638 const struct sys_reg_desc *r)
642 if (!kvm_arm_pmu_v3_ready(vcpu))
643 return trap_raz_wi(vcpu, p, r);
645 if (pmu_access_el0_disabled(vcpu))
648 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
650 idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
651 reg = PMEVTYPER0_EL0 + idx;
652 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
653 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
654 if (idx == ARMV8_PMU_CYCLE_IDX)
658 reg = PMEVTYPER0_EL0 + idx;
663 if (!pmu_counter_idx_valid(vcpu, idx))
667 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
668 vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
670 p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
676 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
677 const struct sys_reg_desc *r)
681 if (!kvm_arm_pmu_v3_ready(vcpu))
682 return trap_raz_wi(vcpu, p, r);
684 if (pmu_access_el0_disabled(vcpu))
687 mask = kvm_pmu_valid_counter_mask(vcpu);
689 val = p->regval & mask;
691 /* accessing PMCNTENSET_EL0 */
692 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
693 kvm_pmu_enable_counter(vcpu, val);
695 /* accessing PMCNTENCLR_EL0 */
696 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
697 kvm_pmu_disable_counter(vcpu, val);
700 p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
706 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
707 const struct sys_reg_desc *r)
709 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
711 if (!kvm_arm_pmu_v3_ready(vcpu))
712 return trap_raz_wi(vcpu, p, r);
714 if (!vcpu_mode_priv(vcpu))
718 u64 val = p->regval & mask;
721 /* accessing PMINTENSET_EL1 */
722 vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
724 /* accessing PMINTENCLR_EL1 */
725 vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
727 p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
733 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
734 const struct sys_reg_desc *r)
736 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
738 if (!kvm_arm_pmu_v3_ready(vcpu))
739 return trap_raz_wi(vcpu, p, r);
741 if (pmu_access_el0_disabled(vcpu))
746 /* accessing PMOVSSET_EL0 */
747 kvm_pmu_overflow_set(vcpu, p->regval & mask);
749 /* accessing PMOVSCLR_EL0 */
750 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
752 p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
758 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
759 const struct sys_reg_desc *r)
763 if (!kvm_arm_pmu_v3_ready(vcpu))
764 return trap_raz_wi(vcpu, p, r);
766 if (pmu_write_swinc_el0_disabled(vcpu))
770 mask = kvm_pmu_valid_counter_mask(vcpu);
771 kvm_pmu_software_increment(vcpu, p->regval & mask);
778 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
779 const struct sys_reg_desc *r)
781 if (!kvm_arm_pmu_v3_ready(vcpu))
782 return trap_raz_wi(vcpu, p, r);
785 if (!vcpu_mode_priv(vcpu))
788 vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
789 & ARMV8_PMU_USERENR_MASK;
791 p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
792 & ARMV8_PMU_USERENR_MASK;
798 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
799 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
801 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
802 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
804 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
805 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
807 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
808 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
810 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
811 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
813 /* Macro to expand the PMEVCNTRn_EL0 register */
814 #define PMU_PMEVCNTR_EL0(n) \
815 /* PMEVCNTRn_EL0 */ \
816 { Op0(0b11), Op1(0b011), CRn(0b1110), \
817 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
818 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
820 /* Macro to expand the PMEVTYPERn_EL0 register */
821 #define PMU_PMEVTYPER_EL0(n) \
822 /* PMEVTYPERn_EL0 */ \
823 { Op0(0b11), Op1(0b011), CRn(0b1110), \
824 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
825 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
828 * Architected system registers.
829 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
831 * Debug handling: We do trap most, if not all debug related system
832 * registers. The implementation is good enough to ensure that a guest
833 * can use these with minimal performance degradation. The drawback is
834 * that we don't implement any of the external debug, none of the
835 * OSlock protocol. This should be revisited if we ever encounter a
836 * more demanding guest...
838 static const struct sys_reg_desc sys_reg_descs[] = {
840 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
843 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
846 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
849 DBG_BCR_BVR_WCR_WVR_EL1(0),
850 DBG_BCR_BVR_WCR_WVR_EL1(1),
852 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
853 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
855 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
856 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
857 DBG_BCR_BVR_WCR_WVR_EL1(2),
858 DBG_BCR_BVR_WCR_WVR_EL1(3),
859 DBG_BCR_BVR_WCR_WVR_EL1(4),
860 DBG_BCR_BVR_WCR_WVR_EL1(5),
861 DBG_BCR_BVR_WCR_WVR_EL1(6),
862 DBG_BCR_BVR_WCR_WVR_EL1(7),
863 DBG_BCR_BVR_WCR_WVR_EL1(8),
864 DBG_BCR_BVR_WCR_WVR_EL1(9),
865 DBG_BCR_BVR_WCR_WVR_EL1(10),
866 DBG_BCR_BVR_WCR_WVR_EL1(11),
867 DBG_BCR_BVR_WCR_WVR_EL1(12),
868 DBG_BCR_BVR_WCR_WVR_EL1(13),
869 DBG_BCR_BVR_WCR_WVR_EL1(14),
870 DBG_BCR_BVR_WCR_WVR_EL1(15),
873 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
876 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
879 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
882 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
885 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
887 /* DBGCLAIMSET_EL1 */
888 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
890 /* DBGCLAIMCLR_EL1 */
891 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
893 /* DBGAUTHSTATUS_EL1 */
894 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
895 trap_dbgauthstatus_el1 },
898 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
901 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
903 /* DBGDTR[TR]X_EL0 */
904 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
908 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
909 NULL, reset_val, DBGVCR32_EL2, 0 },
912 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
913 NULL, reset_mpidr, MPIDR_EL1 },
915 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
916 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
918 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
919 NULL, reset_val, CPACR_EL1, 0 },
921 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
922 access_vm_reg, reset_unknown, TTBR0_EL1 },
924 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
925 access_vm_reg, reset_unknown, TTBR1_EL1 },
927 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
928 access_vm_reg, reset_val, TCR_EL1, 0 },
931 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
932 access_vm_reg, reset_unknown, AFSR0_EL1 },
934 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
935 access_vm_reg, reset_unknown, AFSR1_EL1 },
937 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
938 access_vm_reg, reset_unknown, ESR_EL1 },
940 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
941 access_vm_reg, reset_unknown, FAR_EL1 },
943 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
944 NULL, reset_unknown, PAR_EL1 },
947 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
948 access_pminten, reset_unknown, PMINTENSET_EL1 },
950 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
951 access_pminten, NULL, PMINTENSET_EL1 },
954 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
955 access_vm_reg, reset_unknown, MAIR_EL1 },
957 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
958 access_vm_reg, reset_amair_el1, AMAIR_EL1 },
961 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
962 NULL, reset_val, VBAR_EL1, 0 },
965 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
968 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
972 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
973 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
975 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
976 NULL, reset_unknown, TPIDR_EL1 },
979 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
980 NULL, reset_val, CNTKCTL_EL1, 0},
983 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
984 NULL, reset_unknown, CSSELR_EL1 },
987 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
988 access_pmcr, reset_pmcr, },
990 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
991 access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
993 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
994 access_pmcnten, NULL, PMCNTENSET_EL0 },
996 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
997 access_pmovs, NULL, PMOVSSET_EL0 },
999 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
1000 access_pmswinc, reset_unknown, PMSWINC_EL0 },
1002 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
1003 access_pmselr, reset_unknown, PMSELR_EL0 },
1005 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
1008 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
1011 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
1012 access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1013 /* PMXEVTYPER_EL0 */
1014 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
1015 access_pmu_evtyper },
1017 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
1018 access_pmu_evcntr },
1020 * This register resets as unknown in 64bit mode while it resets as zero
1021 * in 32bit mode. Here we choose to reset it as zero for consistency.
1023 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
1024 access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1026 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
1027 access_pmovs, reset_unknown, PMOVSSET_EL0 },
1030 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
1031 NULL, reset_unknown, TPIDR_EL0 },
1033 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1034 NULL, reset_unknown, TPIDRRO_EL0 },
1037 PMU_PMEVCNTR_EL0(0),
1038 PMU_PMEVCNTR_EL0(1),
1039 PMU_PMEVCNTR_EL0(2),
1040 PMU_PMEVCNTR_EL0(3),
1041 PMU_PMEVCNTR_EL0(4),
1042 PMU_PMEVCNTR_EL0(5),
1043 PMU_PMEVCNTR_EL0(6),
1044 PMU_PMEVCNTR_EL0(7),
1045 PMU_PMEVCNTR_EL0(8),
1046 PMU_PMEVCNTR_EL0(9),
1047 PMU_PMEVCNTR_EL0(10),
1048 PMU_PMEVCNTR_EL0(11),
1049 PMU_PMEVCNTR_EL0(12),
1050 PMU_PMEVCNTR_EL0(13),
1051 PMU_PMEVCNTR_EL0(14),
1052 PMU_PMEVCNTR_EL0(15),
1053 PMU_PMEVCNTR_EL0(16),
1054 PMU_PMEVCNTR_EL0(17),
1055 PMU_PMEVCNTR_EL0(18),
1056 PMU_PMEVCNTR_EL0(19),
1057 PMU_PMEVCNTR_EL0(20),
1058 PMU_PMEVCNTR_EL0(21),
1059 PMU_PMEVCNTR_EL0(22),
1060 PMU_PMEVCNTR_EL0(23),
1061 PMU_PMEVCNTR_EL0(24),
1062 PMU_PMEVCNTR_EL0(25),
1063 PMU_PMEVCNTR_EL0(26),
1064 PMU_PMEVCNTR_EL0(27),
1065 PMU_PMEVCNTR_EL0(28),
1066 PMU_PMEVCNTR_EL0(29),
1067 PMU_PMEVCNTR_EL0(30),
1068 /* PMEVTYPERn_EL0 */
1069 PMU_PMEVTYPER_EL0(0),
1070 PMU_PMEVTYPER_EL0(1),
1071 PMU_PMEVTYPER_EL0(2),
1072 PMU_PMEVTYPER_EL0(3),
1073 PMU_PMEVTYPER_EL0(4),
1074 PMU_PMEVTYPER_EL0(5),
1075 PMU_PMEVTYPER_EL0(6),
1076 PMU_PMEVTYPER_EL0(7),
1077 PMU_PMEVTYPER_EL0(8),
1078 PMU_PMEVTYPER_EL0(9),
1079 PMU_PMEVTYPER_EL0(10),
1080 PMU_PMEVTYPER_EL0(11),
1081 PMU_PMEVTYPER_EL0(12),
1082 PMU_PMEVTYPER_EL0(13),
1083 PMU_PMEVTYPER_EL0(14),
1084 PMU_PMEVTYPER_EL0(15),
1085 PMU_PMEVTYPER_EL0(16),
1086 PMU_PMEVTYPER_EL0(17),
1087 PMU_PMEVTYPER_EL0(18),
1088 PMU_PMEVTYPER_EL0(19),
1089 PMU_PMEVTYPER_EL0(20),
1090 PMU_PMEVTYPER_EL0(21),
1091 PMU_PMEVTYPER_EL0(22),
1092 PMU_PMEVTYPER_EL0(23),
1093 PMU_PMEVTYPER_EL0(24),
1094 PMU_PMEVTYPER_EL0(25),
1095 PMU_PMEVTYPER_EL0(26),
1096 PMU_PMEVTYPER_EL0(27),
1097 PMU_PMEVTYPER_EL0(28),
1098 PMU_PMEVTYPER_EL0(29),
1099 PMU_PMEVTYPER_EL0(30),
1101 * This register resets as unknown in 64bit mode while it resets as zero
1102 * in 32bit mode. Here we choose to reset it as zero for consistency.
1104 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
1105 access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1108 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
1109 NULL, reset_unknown, DACR32_EL2 },
1111 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
1112 NULL, reset_unknown, IFSR32_EL2 },
1114 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
1115 NULL, reset_val, FPEXC32_EL2, 0x70 },
1118 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1119 struct sys_reg_params *p,
1120 const struct sys_reg_desc *r)
1123 return ignore_write(vcpu, p);
1125 u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1);
1126 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
1127 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1129 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1130 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1131 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1132 | (6 << 16) | (el3 << 14) | (el3 << 12));
1137 static bool trap_debug32(struct kvm_vcpu *vcpu,
1138 struct sys_reg_params *p,
1139 const struct sys_reg_desc *r)
1142 vcpu_cp14(vcpu, r->reg) = p->regval;
1143 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1145 p->regval = vcpu_cp14(vcpu, r->reg);
1151 /* AArch32 debug register mappings
1153 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1154 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1156 * All control registers and watchpoint value registers are mapped to
1157 * the lower 32 bits of their AArch64 equivalents. We share the trap
1158 * handlers with the above AArch64 code which checks what mode the
1162 static bool trap_xvr(struct kvm_vcpu *vcpu,
1163 struct sys_reg_params *p,
1164 const struct sys_reg_desc *rd)
1166 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1171 val &= 0xffffffffUL;
1172 val |= p->regval << 32;
1175 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1177 p->regval = *dbg_reg >> 32;
1180 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1185 #define DBG_BCR_BVR_WCR_WVR(n) \
1187 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1189 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1191 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1193 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1195 #define DBGBXVR(n) \
1196 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1199 * Trapped cp14 registers. We generally ignore most of the external
1200 * debug, on the principle that they don't really make sense to a
1201 * guest. Revisit this one day, would this principle change.
1203 static const struct sys_reg_desc cp14_regs[] = {
1205 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1207 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1209 DBG_BCR_BVR_WCR_WVR(0),
1211 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1212 DBG_BCR_BVR_WCR_WVR(1),
1214 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
1216 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
1217 DBG_BCR_BVR_WCR_WVR(2),
1218 /* DBGDTR[RT]Xint */
1219 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1220 /* DBGDTR[RT]Xext */
1221 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1222 DBG_BCR_BVR_WCR_WVR(3),
1223 DBG_BCR_BVR_WCR_WVR(4),
1224 DBG_BCR_BVR_WCR_WVR(5),
1226 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1228 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1229 DBG_BCR_BVR_WCR_WVR(6),
1231 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
1232 DBG_BCR_BVR_WCR_WVR(7),
1233 DBG_BCR_BVR_WCR_WVR(8),
1234 DBG_BCR_BVR_WCR_WVR(9),
1235 DBG_BCR_BVR_WCR_WVR(10),
1236 DBG_BCR_BVR_WCR_WVR(11),
1237 DBG_BCR_BVR_WCR_WVR(12),
1238 DBG_BCR_BVR_WCR_WVR(13),
1239 DBG_BCR_BVR_WCR_WVR(14),
1240 DBG_BCR_BVR_WCR_WVR(15),
1242 /* DBGDRAR (32bit) */
1243 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1247 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1250 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1254 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1257 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1270 /* DBGDSAR (32bit) */
1271 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1274 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1276 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1278 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1280 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1282 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1284 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1287 /* Trapped cp14 64bit registers */
1288 static const struct sys_reg_desc cp14_64_regs[] = {
1289 /* DBGDRAR (64bit) */
1290 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1292 /* DBGDSAR (64bit) */
1293 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1296 /* Macro to expand the PMEVCNTRn register */
1297 #define PMU_PMEVCNTR(n) \
1299 { Op1(0), CRn(0b1110), \
1300 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1303 /* Macro to expand the PMEVTYPERn register */
1304 #define PMU_PMEVTYPER(n) \
1306 { Op1(0), CRn(0b1110), \
1307 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1308 access_pmu_evtyper }
1311 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1312 * depending on the way they are accessed (as a 32bit or a 64bit
1315 static const struct sys_reg_desc cp15_regs[] = {
1316 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1318 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1319 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1320 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1321 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1322 { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
1323 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1324 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1325 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1326 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1327 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1328 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1329 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1332 * DC{C,I,CI}SW operations:
1334 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1335 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1336 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1339 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1340 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1341 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1342 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1343 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1344 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1345 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1346 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1347 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1348 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1349 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1350 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1351 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1352 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1353 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1355 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1356 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1357 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1358 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1361 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1363 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1430 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1433 static const struct sys_reg_desc cp15_64_regs[] = {
1434 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1435 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1436 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1437 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1440 /* Target specific emulation tables */
1441 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1443 void kvm_register_target_sys_reg_table(unsigned int target,
1444 struct kvm_sys_reg_target_table *table)
1446 target_tables[target] = table;
1449 /* Get specific register table for this target. */
1450 static const struct sys_reg_desc *get_target_table(unsigned target,
1454 struct kvm_sys_reg_target_table *table;
1456 table = target_tables[target];
1458 *num = table->table64.num;
1459 return table->table64.table;
1461 *num = table->table32.num;
1462 return table->table32.table;
1466 #define reg_to_match_value(x) \
1468 unsigned long val; \
1469 val = (x)->Op0 << 14; \
1470 val |= (x)->Op1 << 11; \
1471 val |= (x)->CRn << 7; \
1472 val |= (x)->CRm << 3; \
1477 static int match_sys_reg(const void *key, const void *elt)
1479 const unsigned long pval = (unsigned long)key;
1480 const struct sys_reg_desc *r = elt;
1482 return pval - reg_to_match_value(r);
1485 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1486 const struct sys_reg_desc table[],
1489 unsigned long pval = reg_to_match_value(params);
1491 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1494 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1496 kvm_inject_undefined(vcpu);
1501 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1502 * call the corresponding trap handler.
1504 * @params: pointer to the descriptor of the access
1505 * @table: array of trap descriptors
1506 * @num: size of the trap descriptor array
1508 * Return 0 if the access has been handled, and -1 if not.
1510 static int emulate_cp(struct kvm_vcpu *vcpu,
1511 struct sys_reg_params *params,
1512 const struct sys_reg_desc *table,
1515 const struct sys_reg_desc *r;
1518 return -1; /* Not handled */
1520 r = find_reg(params, table, num);
1524 * Not having an accessor means that we have
1525 * configured a trap that we don't know how to
1526 * handle. This certainly qualifies as a gross bug
1527 * that should be fixed right away.
1531 if (likely(r->access(vcpu, params, r))) {
1532 /* Skip instruction, since it was emulated */
1533 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1543 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1544 struct sys_reg_params *params)
1546 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1550 case ESR_ELx_EC_CP15_32:
1551 case ESR_ELx_EC_CP15_64:
1554 case ESR_ELx_EC_CP14_MR:
1555 case ESR_ELx_EC_CP14_64:
1562 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1563 cp, *vcpu_pc(vcpu));
1564 print_sys_reg_instr(params);
1565 kvm_inject_undefined(vcpu);
1569 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1570 * @vcpu: The VCPU pointer
1571 * @run: The kvm_run struct
1573 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1574 const struct sys_reg_desc *global,
1576 const struct sys_reg_desc *target_specific,
1579 struct sys_reg_params params;
1580 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1581 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1582 int Rt2 = (hsr >> 10) & 0x1f;
1584 params.is_aarch32 = true;
1585 params.is_32bit = false;
1586 params.CRm = (hsr >> 1) & 0xf;
1587 params.is_write = ((hsr & 1) == 0);
1590 params.Op1 = (hsr >> 16) & 0xf;
1595 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1596 * backends between AArch32 and AArch64, we get away with it.
1598 if (params.is_write) {
1599 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1600 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1603 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
1605 if (!emulate_cp(vcpu, ¶ms, global, nr_global))
1608 unhandled_cp_access(vcpu, ¶ms);
1611 /* Split up the value between registers for the read side */
1612 if (!params.is_write) {
1613 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1614 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1621 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1622 * @vcpu: The VCPU pointer
1623 * @run: The kvm_run struct
1625 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1626 const struct sys_reg_desc *global,
1628 const struct sys_reg_desc *target_specific,
1631 struct sys_reg_params params;
1632 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1633 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1635 params.is_aarch32 = true;
1636 params.is_32bit = true;
1637 params.CRm = (hsr >> 1) & 0xf;
1638 params.regval = vcpu_get_reg(vcpu, Rt);
1639 params.is_write = ((hsr & 1) == 0);
1640 params.CRn = (hsr >> 10) & 0xf;
1642 params.Op1 = (hsr >> 14) & 0x7;
1643 params.Op2 = (hsr >> 17) & 0x7;
1645 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1646 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1647 if (!params.is_write)
1648 vcpu_set_reg(vcpu, Rt, params.regval);
1652 unhandled_cp_access(vcpu, ¶ms);
1656 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1658 const struct sys_reg_desc *target_specific;
1661 target_specific = get_target_table(vcpu->arch.target, false, &num);
1662 return kvm_handle_cp_64(vcpu,
1663 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1664 target_specific, num);
1667 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1669 const struct sys_reg_desc *target_specific;
1672 target_specific = get_target_table(vcpu->arch.target, false, &num);
1673 return kvm_handle_cp_32(vcpu,
1674 cp15_regs, ARRAY_SIZE(cp15_regs),
1675 target_specific, num);
1678 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1680 return kvm_handle_cp_64(vcpu,
1681 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1685 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1687 return kvm_handle_cp_32(vcpu,
1688 cp14_regs, ARRAY_SIZE(cp14_regs),
1692 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1693 struct sys_reg_params *params)
1696 const struct sys_reg_desc *table, *r;
1698 table = get_target_table(vcpu->arch.target, true, &num);
1700 /* Search target-specific then generic table. */
1701 r = find_reg(params, table, num);
1703 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1707 * Not having an accessor means that we have
1708 * configured a trap that we don't know how to
1709 * handle. This certainly qualifies as a gross bug
1710 * that should be fixed right away.
1714 if (likely(r->access(vcpu, params, r))) {
1715 /* Skip instruction, since it was emulated */
1716 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1719 /* If access function fails, it should complain. */
1721 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1723 print_sys_reg_instr(params);
1725 kvm_inject_undefined(vcpu);
1729 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1730 const struct sys_reg_desc *table, size_t num)
1734 for (i = 0; i < num; i++)
1736 table[i].reset(vcpu, &table[i]);
1740 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1741 * @vcpu: The VCPU pointer
1742 * @run: The kvm_run struct
1744 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1746 struct sys_reg_params params;
1747 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1748 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1751 trace_kvm_handle_sys_reg(esr);
1753 params.is_aarch32 = false;
1754 params.is_32bit = false;
1755 params.Op0 = (esr >> 20) & 3;
1756 params.Op1 = (esr >> 14) & 0x7;
1757 params.CRn = (esr >> 10) & 0xf;
1758 params.CRm = (esr >> 1) & 0xf;
1759 params.Op2 = (esr >> 17) & 0x7;
1760 params.regval = vcpu_get_reg(vcpu, Rt);
1761 params.is_write = !(esr & 1);
1763 ret = emulate_sys_reg(vcpu, ¶ms);
1765 if (!params.is_write)
1766 vcpu_set_reg(vcpu, Rt, params.regval);
1770 /******************************************************************************
1772 *****************************************************************************/
1774 static bool index_to_params(u64 id, struct sys_reg_params *params)
1776 switch (id & KVM_REG_SIZE_MASK) {
1777 case KVM_REG_SIZE_U64:
1778 /* Any unused index bits means it's not valid. */
1779 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1780 | KVM_REG_ARM_COPROC_MASK
1781 | KVM_REG_ARM64_SYSREG_OP0_MASK
1782 | KVM_REG_ARM64_SYSREG_OP1_MASK
1783 | KVM_REG_ARM64_SYSREG_CRN_MASK
1784 | KVM_REG_ARM64_SYSREG_CRM_MASK
1785 | KVM_REG_ARM64_SYSREG_OP2_MASK))
1787 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1788 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1789 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1790 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1791 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1792 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1793 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1794 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1795 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1796 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1803 /* Decode an index value, and find the sys_reg_desc entry. */
1804 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1808 const struct sys_reg_desc *table, *r;
1809 struct sys_reg_params params;
1811 /* We only do sys_reg for now. */
1812 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1815 if (!index_to_params(id, ¶ms))
1818 table = get_target_table(vcpu->arch.target, true, &num);
1819 r = find_reg(¶ms, table, num);
1821 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1823 /* Not saved in the sys_reg array? */
1831 * These are the invariant sys_reg registers: we let the guest see the
1832 * host versions of these, so they're part of the guest state.
1834 * A future CPU may provide a mechanism to present different values to
1835 * the guest, or a future kvm may trap them.
1838 #define FUNCTION_INVARIANT(reg) \
1839 static void get_##reg(struct kvm_vcpu *v, \
1840 const struct sys_reg_desc *r) \
1842 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
1845 FUNCTION_INVARIANT(midr_el1)
1846 FUNCTION_INVARIANT(ctr_el0)
1847 FUNCTION_INVARIANT(revidr_el1)
1848 FUNCTION_INVARIANT(id_pfr0_el1)
1849 FUNCTION_INVARIANT(id_pfr1_el1)
1850 FUNCTION_INVARIANT(id_dfr0_el1)
1851 FUNCTION_INVARIANT(id_afr0_el1)
1852 FUNCTION_INVARIANT(id_mmfr0_el1)
1853 FUNCTION_INVARIANT(id_mmfr1_el1)
1854 FUNCTION_INVARIANT(id_mmfr2_el1)
1855 FUNCTION_INVARIANT(id_mmfr3_el1)
1856 FUNCTION_INVARIANT(id_isar0_el1)
1857 FUNCTION_INVARIANT(id_isar1_el1)
1858 FUNCTION_INVARIANT(id_isar2_el1)
1859 FUNCTION_INVARIANT(id_isar3_el1)
1860 FUNCTION_INVARIANT(id_isar4_el1)
1861 FUNCTION_INVARIANT(id_isar5_el1)
1862 FUNCTION_INVARIANT(clidr_el1)
1863 FUNCTION_INVARIANT(aidr_el1)
1865 /* ->val is filled in by kvm_sys_reg_table_init() */
1866 static struct sys_reg_desc invariant_sys_regs[] = {
1867 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1868 NULL, get_midr_el1 },
1869 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1870 NULL, get_revidr_el1 },
1871 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1872 NULL, get_id_pfr0_el1 },
1873 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1874 NULL, get_id_pfr1_el1 },
1875 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1876 NULL, get_id_dfr0_el1 },
1877 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1878 NULL, get_id_afr0_el1 },
1879 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1880 NULL, get_id_mmfr0_el1 },
1881 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1882 NULL, get_id_mmfr1_el1 },
1883 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1884 NULL, get_id_mmfr2_el1 },
1885 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1886 NULL, get_id_mmfr3_el1 },
1887 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1888 NULL, get_id_isar0_el1 },
1889 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1890 NULL, get_id_isar1_el1 },
1891 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1892 NULL, get_id_isar2_el1 },
1893 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1894 NULL, get_id_isar3_el1 },
1895 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1896 NULL, get_id_isar4_el1 },
1897 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1898 NULL, get_id_isar5_el1 },
1899 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1900 NULL, get_clidr_el1 },
1901 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1902 NULL, get_aidr_el1 },
1903 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1904 NULL, get_ctr_el0 },
1907 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1909 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1914 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1916 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1921 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1923 struct sys_reg_params params;
1924 const struct sys_reg_desc *r;
1926 if (!index_to_params(id, ¶ms))
1929 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1933 return reg_to_user(uaddr, &r->val, id);
1936 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1938 struct sys_reg_params params;
1939 const struct sys_reg_desc *r;
1941 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1943 if (!index_to_params(id, ¶ms))
1945 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1949 err = reg_from_user(&val, uaddr, id);
1953 /* This is what we mean by invariant: you can't change it. */
1960 static bool is_valid_cache(u32 val)
1964 if (val >= CSSELR_MAX)
1967 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1969 ctype = (cache_levels >> (level * 3)) & 7;
1972 case 0: /* No cache */
1974 case 1: /* Instruction cache only */
1976 case 2: /* Data cache only */
1977 case 4: /* Unified cache */
1979 case 3: /* Separate instruction and data caches */
1981 default: /* Reserved: we can't know instruction or data. */
1986 static int demux_c15_get(u64 id, void __user *uaddr)
1989 u32 __user *uval = uaddr;
1991 /* Fail if we have unknown bits set. */
1992 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1993 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1996 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1997 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1998 if (KVM_REG_SIZE(id) != 4)
2000 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2001 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2002 if (!is_valid_cache(val))
2005 return put_user(get_ccsidr(val), uval);
2011 static int demux_c15_set(u64 id, void __user *uaddr)
2014 u32 __user *uval = uaddr;
2016 /* Fail if we have unknown bits set. */
2017 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2018 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2021 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2022 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2023 if (KVM_REG_SIZE(id) != 4)
2025 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2026 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2027 if (!is_valid_cache(val))
2030 if (get_user(newval, uval))
2033 /* This is also invariant: you can't change it. */
2034 if (newval != get_ccsidr(val))
2042 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2044 const struct sys_reg_desc *r;
2045 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2047 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2048 return demux_c15_get(reg->id, uaddr);
2050 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2053 r = index_to_sys_reg_desc(vcpu, reg->id);
2055 return get_invariant_sys_reg(reg->id, uaddr);
2058 return (r->get_user)(vcpu, r, reg, uaddr);
2060 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2063 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2065 const struct sys_reg_desc *r;
2066 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2068 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2069 return demux_c15_set(reg->id, uaddr);
2071 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2074 r = index_to_sys_reg_desc(vcpu, reg->id);
2076 return set_invariant_sys_reg(reg->id, uaddr);
2079 return (r->set_user)(vcpu, r, reg, uaddr);
2081 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2084 static unsigned int num_demux_regs(void)
2086 unsigned int i, count = 0;
2088 for (i = 0; i < CSSELR_MAX; i++)
2089 if (is_valid_cache(i))
2095 static int write_demux_regids(u64 __user *uindices)
2097 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2100 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2101 for (i = 0; i < CSSELR_MAX; i++) {
2102 if (!is_valid_cache(i))
2104 if (put_user(val | i, uindices))
2111 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2113 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2114 KVM_REG_ARM64_SYSREG |
2115 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2116 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2117 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2118 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2119 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2122 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2127 if (put_user(sys_reg_to_index(reg), *uind))
2134 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2135 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2137 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2138 unsigned int total = 0;
2141 /* We check for duplicates here, to allow arch-specific overrides. */
2142 i1 = get_target_table(vcpu->arch.target, true, &num);
2145 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2147 BUG_ON(i1 == end1 || i2 == end2);
2149 /* Walk carefully, as both tables may refer to the same register. */
2151 int cmp = cmp_sys_reg(i1, i2);
2152 /* target-specific overrides generic entry. */
2154 /* Ignore registers we trap but don't save. */
2156 if (!copy_reg_to_user(i1, &uind))
2161 /* Ignore registers we trap but don't save. */
2163 if (!copy_reg_to_user(i2, &uind))
2169 if (cmp <= 0 && ++i1 == end1)
2171 if (cmp >= 0 && ++i2 == end2)
2177 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2179 return ARRAY_SIZE(invariant_sys_regs)
2181 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2184 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2189 /* Then give them all the invariant registers' indices. */
2190 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2191 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2196 err = walk_sys_regs(vcpu, uindices);
2201 return write_demux_regids(uindices);
2204 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2208 for (i = 1; i < n; i++) {
2209 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2210 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2218 void kvm_sys_reg_table_init(void)
2221 struct sys_reg_desc clidr;
2223 /* Make sure tables are unique and in order. */
2224 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2225 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2226 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2227 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2228 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2229 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2231 /* We abuse the reset function to overwrite the table itself. */
2232 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2233 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2236 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2238 * If software reads the Cache Type fields from Ctype1
2239 * upwards, once it has seen a value of 0b000, no caches
2240 * exist at further-out levels of the hierarchy. So, for
2241 * example, if Ctype3 is the first Cache Type field with a
2242 * value of 0b000, the values of Ctype4 to Ctype7 must be
2245 get_clidr_el1(NULL, &clidr); /* Ugly... */
2246 cache_levels = clidr.val;
2247 for (i = 0; i < 7; i++)
2248 if (((cache_levels >> (i*3)) & 7) == 0)
2250 /* Clear all higher bits. */
2251 cache_levels &= (1 << (i*3))-1;
2255 * kvm_reset_sys_regs - sets system registers to reset value
2256 * @vcpu: The VCPU pointer
2258 * This function finds the right table above and sets the registers on the
2259 * virtual CPU struct to their architecturally defined reset values.
2261 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2264 const struct sys_reg_desc *table;
2266 /* Catch someone adding a register without putting in reset entry. */
2267 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2269 /* Generic chip reset first (so target could override). */
2270 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2272 table = get_target_table(vcpu->arch.target, true, &num);
2273 reset_sys_reg_descs(vcpu, table, num);
2275 for (num = 1; num < NR_SYS_REGS; num++)
2276 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2277 panic("Didn't reset vcpu_sys_reg(%zi)", num);