1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/maple_tree.h>
20 #include <linux/percpu.h>
21 #include <linux/psci.h>
22 #include <asm/arch_gicv3.h>
23 #include <asm/barrier.h>
24 #include <asm/cpufeature.h>
25 #include <asm/cputype.h>
26 #include <asm/daifflags.h>
27 #include <asm/fpsimd.h>
29 #include <asm/kvm_asm.h>
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41 #define KVM_VCPU_MAX_FEATURES 7
42 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
44 #define KVM_REQ_SLEEP \
45 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
46 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
47 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
48 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
49 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
50 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
51 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
52 #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
54 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
55 KVM_DIRTY_LOG_INITIALLY_SET)
57 #define KVM_HAVE_MMU_RWLOCK
60 * Mode of operation configurable with kvm-arm.mode early param.
61 * See Documentation/admin-guide/kernel-parameters.txt for more information.
70 enum kvm_mode kvm_get_mode(void);
72 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
75 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
77 extern unsigned int __ro_after_init kvm_sve_max_vl;
78 int __init kvm_arm_init_sve(void);
80 u32 __attribute_const__ kvm_target_cpu(void);
81 void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
82 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
84 struct kvm_hyp_memcache {
86 unsigned long nr_pages;
89 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
91 phys_addr_t (*to_pa)(void *virt))
98 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
99 void *(*to_va)(phys_addr_t phys))
101 phys_addr_t *p = to_va(mc->head);
112 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
113 unsigned long min_pages,
114 void *(*alloc_fn)(void *arg),
115 phys_addr_t (*to_pa)(void *virt),
118 while (mc->nr_pages < min_pages) {
119 phys_addr_t *p = alloc_fn(arg);
123 push_hyp_memcache(mc, p, to_pa);
129 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
130 void (*free_fn)(void *virt, void *arg),
131 void *(*to_va)(phys_addr_t phys),
135 free_fn(pop_hyp_memcache(mc, to_va), arg);
138 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
139 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
146 struct kvm_vmid vmid;
149 * stage2 entry level table
151 * Two kvm_s2_mmu structures in the same VM can point to the same
152 * pgd here. This happens when running a guest using a
153 * translation regime that isn't affected by its own stage-2
154 * translation, such as a non-VHE hypervisor running at vEL2, or
155 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
156 * canonical stage-2 page tables.
158 phys_addr_t pgd_phys;
159 struct kvm_pgtable *pgt;
162 * VTCR value used on the host. For a non-NV guest (or a NV
163 * guest that runs in a context where its own S2 doesn't
164 * apply), its T0SZ value reflects that of the IPA size.
166 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
171 /* The last vcpu id that ran on each physical CPU */
172 int __percpu *last_vcpu_ran;
174 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
176 * Memory cache used to split
177 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
178 * is used to allocate stage2 page tables while splitting huge
179 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
180 * influences both the capacity of the split page cache, and
181 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
184 * Protected by kvm->slots_lock.
186 struct kvm_mmu_memory_cache split_page_cache;
187 uint64_t split_page_chunk_size;
189 struct kvm_arch *arch;
192 struct kvm_arch_memory_slot {
196 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
198 * @std_bmap: Bitmap of standard secure service calls
199 * @std_hyp_bmap: Bitmap of standard hypervisor service calls
200 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
202 struct kvm_smccc_features {
203 unsigned long std_bmap;
204 unsigned long std_hyp_bmap;
205 unsigned long vendor_hyp_bmap;
208 typedef unsigned int pkvm_handle_t;
210 struct kvm_protected_vm {
211 pkvm_handle_t handle;
212 struct kvm_hyp_memcache teardown_mc;
215 struct kvm_mpidr_data {
217 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
220 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
222 unsigned long mask = data->mpidr_mask;
223 u64 aff = mpidr & MPIDR_HWID_BITMASK;
224 int nbits, bit, bit_idx = 0;
228 * If this looks like RISC-V's BEXT or x86's PEXT
229 * instructions, it isn't by accident.
232 for_each_set_bit(bit, &mask, nbits) {
233 index |= (aff & BIT(bit)) >> (bit - bit_idx);
241 struct kvm_s2_mmu mmu;
243 /* Interrupt controller */
244 struct vgic_dist vgic;
247 struct arch_timer_vm_data timer_data;
249 /* Mandated version of PSCI */
252 /* Protects VM-scoped configuration data */
253 struct mutex config_lock;
256 * If we encounter a data abort without valid instruction syndrome
257 * information, report this to user space. User space can (and
258 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
261 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
262 /* Memory Tagging Extension enabled for the guest */
263 #define KVM_ARCH_FLAG_MTE_ENABLED 1
264 /* At least one vCPU has ran in the VM */
265 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
266 /* The vCPU feature set for the VM is configured */
267 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
268 /* PSCI SYSTEM_SUSPEND enabled for the guest */
269 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
270 /* VM counter offset */
271 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
272 /* Timer PPIs made immutable */
273 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
274 /* Initial ID reg values loaded */
275 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
278 /* VM-wide vCPU feature set */
279 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
281 /* MPIDR to vcpu index mapping, optional */
282 struct kvm_mpidr_data *mpidr_data;
285 * VM-wide PMU filter, implemented as a bitmap and big enough for
286 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
288 unsigned long *pmu_filter;
289 struct arm_pmu *arm_pmu;
291 cpumask_var_t supported_cpus;
293 /* PMCR_EL0.N value for the guest */
296 /* Hypercall features firmware registers' descriptor */
297 struct kvm_smccc_features smccc_feat;
298 struct maple_tree smccc_filter;
301 * Emulated CPU ID registers per VM
302 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
303 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
305 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
306 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
308 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
309 #define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
310 #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
311 u64 id_regs[KVM_ARM_ID_REG_NUM];
314 * For an untrusted host VM, 'pkvm.handle' is used to lookup
315 * the associated pKVM instance in the hypervisor.
317 struct kvm_protected_vm pkvm;
320 struct kvm_vcpu_fault_info {
321 u64 esr_el2; /* Hyp Syndrom Register */
322 u64 far_el2; /* Hyp Fault Address Register */
323 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
324 u64 disr_el1; /* Deferred [SError] Status Register */
328 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
329 MPIDR_EL1, /* MultiProcessor Affinity Register */
330 CLIDR_EL1, /* Cache Level ID Register */
331 CSSELR_EL1, /* Cache Size Selection Register */
332 SCTLR_EL1, /* System Control Register */
333 ACTLR_EL1, /* Auxiliary Control Register */
334 CPACR_EL1, /* Coprocessor Access Control */
335 ZCR_EL1, /* SVE Control */
336 TTBR0_EL1, /* Translation Table Base Register 0 */
337 TTBR1_EL1, /* Translation Table Base Register 1 */
338 TCR_EL1, /* Translation Control Register */
339 TCR2_EL1, /* Extended Translation Control Register */
340 ESR_EL1, /* Exception Syndrome Register */
341 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
342 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
343 FAR_EL1, /* Fault Address Register */
344 MAIR_EL1, /* Memory Attribute Indirection Register */
345 VBAR_EL1, /* Vector Base Address Register */
346 CONTEXTIDR_EL1, /* Context ID Register */
347 TPIDR_EL0, /* Thread ID, User R/W */
348 TPIDRRO_EL0, /* Thread ID, User R/O */
349 TPIDR_EL1, /* Thread ID, Privileged */
350 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
351 CNTKCTL_EL1, /* Timer Control Register (EL1) */
352 PAR_EL1, /* Physical Address Register */
353 MDSCR_EL1, /* Monitor Debug System Control Register */
354 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
355 OSLSR_EL1, /* OS Lock Status Register */
356 DISR_EL1, /* Deferred Interrupt Status Register */
358 /* Performance Monitors Registers */
359 PMCR_EL0, /* Control Register */
360 PMSELR_EL0, /* Event Counter Selection Register */
361 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
362 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
363 PMCCNTR_EL0, /* Cycle Counter Register */
364 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
365 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
366 PMCCFILTR_EL0, /* Cycle Count Filter Register */
367 PMCNTENSET_EL0, /* Count Enable Set Register */
368 PMINTENSET_EL1, /* Interrupt Enable Set Register */
369 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
370 PMUSERENR_EL0, /* User Enable Register */
372 /* Pointer Authentication Registers in a strict increasing order. */
394 /* Memory Tagging Extension registers */
395 RGSR_EL1, /* Random Allocation Tag Seed Register */
396 GCR_EL1, /* Tag Control Register */
397 TFSR_EL1, /* Tag Fault Status Register (EL1) */
398 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
400 /* Permission Indirection Extension registers */
401 PIR_EL1, /* Permission Indirection Register 1 (EL1) */
402 PIRE0_EL1, /* Permission Indirection Register 0 (EL1) */
404 /* 32bit specific registers. */
405 DACR32_EL2, /* Domain Access Control Register */
406 IFSR32_EL2, /* Instruction Fault Status Register */
407 FPEXC32_EL2, /* Floating-Point Exception Control Register */
408 DBGVCR32_EL2, /* Debug Vector Catch Register */
411 VPIDR_EL2, /* Virtualization Processor ID Register */
412 VMPIDR_EL2, /* Virtualization Multiprocessor ID Register */
413 SCTLR_EL2, /* System Control Register (EL2) */
414 ACTLR_EL2, /* Auxiliary Control Register (EL2) */
415 HCR_EL2, /* Hypervisor Configuration Register */
416 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
417 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
418 HSTR_EL2, /* Hypervisor System Trap Register */
419 HACR_EL2, /* Hypervisor Auxiliary Control Register */
420 HCRX_EL2, /* Extended Hypervisor Configuration Register */
421 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
422 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
423 TCR_EL2, /* Translation Control Register (EL2) */
424 VTTBR_EL2, /* Virtualization Translation Table Base Register */
425 VTCR_EL2, /* Virtualization Translation Control Register */
426 SPSR_EL2, /* EL2 saved program status register */
427 ELR_EL2, /* EL2 exception link register */
428 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
429 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */
430 ESR_EL2, /* Exception Syndrome Register (EL2) */
431 FAR_EL2, /* Fault Address Register (EL2) */
432 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */
433 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */
434 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */
435 VBAR_EL2, /* Vector Base Address Register (EL2) */
436 RVBAR_EL2, /* Reset Vector Base Address Register */
437 CONTEXTIDR_EL2, /* Context ID Register (EL2) */
438 TPIDR_EL2, /* EL2 Software Thread ID Register */
439 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
440 SP_EL2, /* EL2 Stack Pointer */
451 NR_SYS_REGS /* Nothing after this line! */
454 struct kvm_cpu_context {
455 struct user_pt_regs regs; /* sp = sp_el0 */
462 struct user_fpsimd_state fp_regs;
464 u64 sys_regs[NR_SYS_REGS];
466 struct kvm_vcpu *__hyp_running_vcpu;
469 struct kvm_host_data {
470 struct kvm_cpu_context host_ctxt;
473 struct kvm_host_psci_config {
474 /* PSCI version used by host. */
478 /* Function IDs used by host if version is v0.1. */
479 struct psci_0_1_function_ids function_ids_0_1;
481 bool psci_0_1_cpu_suspend_implemented;
482 bool psci_0_1_cpu_on_implemented;
483 bool psci_0_1_cpu_off_implemented;
484 bool psci_0_1_migrate_implemented;
487 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
488 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
490 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
491 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
493 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
494 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
496 struct vcpu_reset_state {
503 struct kvm_vcpu_arch {
504 struct kvm_cpu_context ctxt;
507 * Guest floating point state
509 * The architecture has two main floating point extensions,
510 * the original FPSIMD and SVE. These have overlapping
511 * register views, with the FPSIMD V registers occupying the
512 * low 128 bits of the SVE Z registers. When the core
513 * floating point code saves the register state of a task it
514 * records which view it saved in fp_type.
517 enum fp_type fp_type;
518 unsigned int sve_max_vl;
521 /* Stage 2 paging state used by the hardware on next switch */
522 struct kvm_s2_mmu *hw_mmu;
524 /* Values of trap registers for the guest. */
529 /* Values of trap registers for the host before guest entry. */
532 /* Exception Information */
533 struct kvm_vcpu_fault_info fault;
535 /* Ownership of the FP regs */
539 FP_STATE_GUEST_OWNED,
542 /* Configuration flags, set once and for all before the vcpu can run */
545 /* Input flags to the hypervisor code, potentially cleared after use */
548 /* State flags for kernel bookkeeping, unused by the hypervisor code */
552 * Don't run the guest (internal implementation need).
554 * Contrary to the flags above, this is set/cleared outside of
555 * a vcpu context, and thus cannot be mixed with the flags
556 * themselves (or the flag accesses need to be made atomic).
561 * We maintain more than a single set of debug registers to support
562 * debugging the guest from the host and to maintain separate host and
563 * guest state during world switches. vcpu_debug_state are the debug
564 * registers of the vcpu as the guest sees them. host_debug_state are
565 * the host registers which are saved and restored during
566 * world switches. external_debug_state contains the debug
567 * values we want to debug the guest. This is set via the
568 * KVM_SET_GUEST_DEBUG ioctl.
570 * debug_ptr points to the set of debug registers that should be loaded
571 * onto the hardware when running the guest.
573 struct kvm_guest_debug_arch *debug_ptr;
574 struct kvm_guest_debug_arch vcpu_debug_state;
575 struct kvm_guest_debug_arch external_debug_state;
577 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
578 struct task_struct *parent_task;
581 /* {Break,watch}point registers */
582 struct kvm_guest_debug_arch regs;
583 /* Statistical profiling extension */
585 /* Self-hosted trace */
590 struct vgic_cpu vgic_cpu;
591 struct arch_timer_cpu timer_cpu;
595 * Guest registers we preserve during guest debugging.
597 * These shadow registers are updated by the kvm_handle_sys_reg
598 * trap handler if the guest accesses or updates them while we
599 * are using guest debug.
604 } guest_debug_preserved;
606 /* vcpu power state */
607 struct kvm_mp_state mp_state;
608 spinlock_t mp_state_lock;
610 /* Cache some mmu pages needed inside spinlock regions */
611 struct kvm_mmu_memory_cache mmu_page_cache;
613 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
616 /* Additional reset state */
617 struct vcpu_reset_state reset_state;
625 /* Per-vcpu CCSIDR override or NULL */
630 * Each 'flag' is composed of a comma-separated triplet:
632 * - the flag-set it belongs to in the vcpu->arch structure
633 * - the value for that flag
634 * - the mask for that flag
636 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
637 * unpack_vcpu_flag() extract the flag value from the triplet for
638 * direct use outside of the flag accessors.
640 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
642 #define __unpack_flag(_set, _f, _m) _f
643 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
645 #define __build_check_flag(v, flagset, f, m) \
647 typeof(v->arch.flagset) *_fset; \
649 /* Check that the flags fit in the mask */ \
650 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
651 /* Check that the flags fit in the type */ \
652 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
655 #define __vcpu_get_flag(v, flagset, f, m) \
657 __build_check_flag(v, flagset, f, m); \
659 READ_ONCE(v->arch.flagset) & (m); \
663 * Note that the set/clear accessors must be preempt-safe in order to
664 * avoid nesting them with load/put which also manipulate flags...
666 #ifdef __KVM_NVHE_HYPERVISOR__
667 /* the nVHE hypervisor is always non-preemptible */
668 #define __vcpu_flags_preempt_disable()
669 #define __vcpu_flags_preempt_enable()
671 #define __vcpu_flags_preempt_disable() preempt_disable()
672 #define __vcpu_flags_preempt_enable() preempt_enable()
675 #define __vcpu_set_flag(v, flagset, f, m) \
677 typeof(v->arch.flagset) *fset; \
679 __build_check_flag(v, flagset, f, m); \
681 fset = &v->arch.flagset; \
682 __vcpu_flags_preempt_disable(); \
683 if (HWEIGHT(m) > 1) \
686 __vcpu_flags_preempt_enable(); \
689 #define __vcpu_clear_flag(v, flagset, f, m) \
691 typeof(v->arch.flagset) *fset; \
693 __build_check_flag(v, flagset, f, m); \
695 fset = &v->arch.flagset; \
696 __vcpu_flags_preempt_disable(); \
698 __vcpu_flags_preempt_enable(); \
701 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
702 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
703 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
705 /* SVE exposed to guest */
706 #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
707 /* SVE config completed */
708 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
709 /* PTRAUTH exposed to guest */
710 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
711 /* KVM_ARM_VCPU_INIT completed */
712 #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
714 /* Exception pending */
715 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
717 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
718 * be set together with an exception...
720 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
721 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
722 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
724 /* Helpers to encode exceptions with minimum fuss */
725 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
726 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
727 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
730 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
735 #define EXCEPT_AA32_UND __vcpu_except_flags(0)
736 #define EXCEPT_AA32_IABT __vcpu_except_flags(1)
737 #define EXCEPT_AA32_DABT __vcpu_except_flags(2)
739 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
740 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
741 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
742 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
743 /* For AArch64 with NV: */
744 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
745 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
746 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
747 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
748 /* Guest debug is live */
749 #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
750 /* Save SPE context if active */
751 #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
752 /* Save TRBE context if active */
753 #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
754 /* vcpu running in HYP context */
755 #define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7))
757 /* SVE enabled for host EL0 */
758 #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
759 /* SME enabled for EL0 */
760 #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
761 /* Physical CPU not in supported_cpus */
762 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
763 /* WFIT instruction trapped */
764 #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
765 /* vcpu system registers loaded on physical CPU */
766 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
767 /* Software step state is Active-pending */
768 #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
769 /* PMUSERENR for the guest EL0 is on physical CPU */
770 #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
771 /* WFI instruction trapped */
772 #define IN_WFI __vcpu_single_flag(sflags, BIT(7))
775 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
776 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
777 sve_ffr_offset((vcpu)->arch.sve_max_vl))
779 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
781 #define vcpu_sve_state_size(vcpu) ({ \
783 unsigned int __vcpu_vq; \
785 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
788 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
789 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
795 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
796 KVM_GUESTDBG_USE_SW_BP | \
797 KVM_GUESTDBG_USE_HW | \
798 KVM_GUESTDBG_SINGLESTEP)
800 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
801 vcpu_get_flag(vcpu, GUEST_HAS_SVE))
803 #ifdef CONFIG_ARM64_PTR_AUTH
804 #define vcpu_has_ptrauth(vcpu) \
805 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
806 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
807 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
809 #define vcpu_has_ptrauth(vcpu) false
812 #define vcpu_on_unsupported_cpu(vcpu) \
813 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
815 #define vcpu_set_on_unsupported_cpu(vcpu) \
816 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
818 #define vcpu_clear_on_unsupported_cpu(vcpu) \
819 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
821 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
824 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
825 * memory backed version of a register, and not the one most recently
826 * accessed by a running VCPU. For example, for userspace access or
827 * for system registers that are never context switched, but only
830 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
832 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
834 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
836 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
837 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
839 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
844 * System registers listed in the switch are not saved on every
845 * exit from the guest but are only saved on vcpu_put.
847 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
848 * should never be listed below, because the guest cannot modify its
849 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
850 * thread when emulating cross-VCPU communication.
856 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
857 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
858 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
859 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
860 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
861 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
862 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
863 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
864 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
865 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
866 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
867 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
868 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
869 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
870 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
871 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
872 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
873 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
874 case PAR_EL1: *val = read_sysreg_par(); break;
875 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
876 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
877 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
878 default: return false;
884 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
889 * System registers listed in the switch are not restored on every
890 * entry to the guest but are only restored on vcpu_load.
892 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
893 * should never be listed below, because the MPIDR should only be set
894 * once, before running the VCPU, and never changed later.
900 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
901 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
902 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
903 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
904 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
905 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
906 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
907 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
908 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
909 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
910 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
911 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
912 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
913 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
914 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
915 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
916 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
917 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
918 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
919 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
920 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
921 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
922 default: return false;
929 struct kvm_vm_stat_generic generic;
932 struct kvm_vcpu_stat {
933 struct kvm_vcpu_stat_generic generic;
938 u64 mmio_exit_kernel;
943 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
944 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
945 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
946 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
948 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
949 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
951 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
952 struct kvm_vcpu_events *events);
954 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
955 struct kvm_vcpu_events *events);
957 #define KVM_ARCH_WANT_MMU_NOTIFIER
959 void kvm_arm_halt_guest(struct kvm *kvm);
960 void kvm_arm_resume_guest(struct kvm *kvm);
962 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
964 #ifndef __KVM_NVHE_HYPERVISOR__
965 #define kvm_call_hyp_nvhe(f, ...) \
967 struct arm_smccc_res res; \
969 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
970 ##__VA_ARGS__, &res); \
971 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
977 * The couple of isb() below are there to guarantee the same behaviour
978 * on VHE as on !VHE, where the eret to EL1 acts as a context
979 * synchronization event.
981 #define kvm_call_hyp(f, ...) \
987 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
991 #define kvm_call_hyp_ret(f, ...) \
993 typeof(f(__VA_ARGS__)) ret; \
996 ret = f(__VA_ARGS__); \
999 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1004 #else /* __KVM_NVHE_HYPERVISOR__ */
1005 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
1006 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
1007 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
1008 #endif /* __KVM_NVHE_HYPERVISOR__ */
1010 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
1011 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
1013 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
1014 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
1015 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
1016 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
1017 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
1018 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
1019 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
1021 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
1023 int __init kvm_sys_reg_table_init(void);
1024 int __init populate_nv_trap_config(void);
1026 bool lock_all_vcpus(struct kvm *kvm);
1027 void unlock_all_vcpus(struct kvm *kvm);
1030 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
1031 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
1033 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
1034 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
1037 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
1038 * arrived in guest context. For arm64, any event that arrives while a vCPU is
1039 * loaded is considered to be "in guest".
1041 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1043 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1046 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1047 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1048 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1050 bool kvm_arm_pvtime_supported(void);
1051 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1052 struct kvm_device_attr *attr);
1053 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1054 struct kvm_device_attr *attr);
1055 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1056 struct kvm_device_attr *attr);
1058 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1059 int __init kvm_arm_vmid_alloc_init(void);
1060 void __init kvm_arm_vmid_alloc_free(void);
1061 bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1062 void kvm_arm_vmid_clear_active(void);
1064 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1066 vcpu_arch->steal.base = INVALID_GPA;
1069 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1071 return (vcpu_arch->steal.base != INVALID_GPA);
1074 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1076 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1078 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1080 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1082 /* The host's MPIDR is immutable, so let's set it up at boot time */
1083 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1086 static inline bool kvm_system_needs_idmapped_vectors(void)
1088 return cpus_have_final_cap(ARM64_SPECTRE_V3A);
1091 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1092 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1094 void kvm_arm_init_debug(void);
1095 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
1096 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
1097 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
1098 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
1100 #define kvm_vcpu_os_lock_enabled(vcpu) \
1101 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1103 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1104 struct kvm_device_attr *attr);
1105 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1106 struct kvm_device_attr *attr);
1107 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1108 struct kvm_device_attr *attr);
1110 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1111 struct kvm_arm_copy_mte_tags *copy_tags);
1112 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1113 struct kvm_arm_counter_offset *offset);
1114 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
1115 struct reg_mask_range *range);
1117 /* Guest/host FPSIMD coordination helpers */
1118 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1119 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1120 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1121 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1122 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1123 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
1125 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1127 return (!has_vhe() && attr->exclude_host);
1130 /* Flags for host debug state */
1131 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
1132 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
1135 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
1136 void kvm_clr_pmu_events(u32 clr);
1137 bool kvm_set_pmuserenr(u64 val);
1139 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
1140 static inline void kvm_clr_pmu_events(u32 clr) {}
1141 static inline bool kvm_set_pmuserenr(u64 val)
1147 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1148 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
1150 int __init kvm_set_ipa_limit(void);
1152 #define __KVM_HAVE_ARCH_VM_ALLOC
1153 struct kvm *kvm_arch_alloc_vm(void);
1155 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1157 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1159 static inline bool kvm_vm_is_protected(struct kvm *kvm)
1164 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1165 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1167 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1169 #define kvm_has_mte(kvm) \
1170 (system_supports_mte() && \
1171 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1173 #define kvm_supports_32bit_el0() \
1174 (system_supports_32bit_el0() && \
1175 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1177 #define kvm_vm_has_ran_once(kvm) \
1178 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1180 int kvm_trng_call(struct kvm_vcpu *vcpu);
1182 extern phys_addr_t hyp_mem_base;
1183 extern phys_addr_t hyp_mem_size;
1184 void __init kvm_hyp_reserve(void);
1186 static inline void kvm_hyp_reserve(void) { }
1189 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1190 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1192 #endif /* __ARM64_KVM_HOST_H__ */