1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
5 #include <linux/kvm_host.h>
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
12 #define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
13 #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
14 #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
16 static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
18 #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
21 return vcpu->arch.regs[VCPU_REGS_##uname]; \
23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
28 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
29 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
30 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
31 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
32 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
33 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
34 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
36 BUILD_KVM_GPR_ACCESSORS(r8, R8)
37 BUILD_KVM_GPR_ACCESSORS(r9, R9)
38 BUILD_KVM_GPR_ACCESSORS(r10, R10)
39 BUILD_KVM_GPR_ACCESSORS(r11, R11)
40 BUILD_KVM_GPR_ACCESSORS(r12, R12)
41 BUILD_KVM_GPR_ACCESSORS(r13, R13)
42 BUILD_KVM_GPR_ACCESSORS(r14, R14)
43 BUILD_KVM_GPR_ACCESSORS(r15, R15)
48 * 0 0 register in VMCS/VMCB
50 * 1 0 register in vcpu->arch
51 * 1 1 register in vcpu->arch, needs to be stored back
53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
56 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
62 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
68 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
71 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
74 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
75 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
79 * The "raw" register helpers are only for cases where the full 64 bits of a
80 * register are read/written irrespective of current vCPU mode. In other words,
81 * odds are good you shouldn't be using the raw variants.
83 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
85 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
88 if (!kvm_register_is_available(vcpu, reg))
89 static_call(kvm_x86_cache_reg)(vcpu, reg);
91 return vcpu->arch.regs[reg];
94 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
97 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
100 vcpu->arch.regs[reg] = val;
101 kvm_register_mark_dirty(vcpu, reg);
104 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
106 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
109 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
111 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
114 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
116 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
119 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
121 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
124 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
126 might_sleep(); /* on svm */
128 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
129 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
131 return vcpu->arch.walk_mmu->pdptrs[index];
134 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
136 vcpu->arch.walk_mmu->pdptrs[index] = value;
139 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
141 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
142 if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
143 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
144 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
145 return vcpu->arch.cr0 & mask;
148 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
150 return kvm_read_cr0_bits(vcpu, ~0UL);
153 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
155 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
156 if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
157 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
158 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
159 return vcpu->arch.cr4 & mask;
162 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
164 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
165 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
166 return vcpu->arch.cr3;
169 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
171 return kvm_read_cr4_bits(vcpu, ~0UL);
174 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
176 return (kvm_rax_read(vcpu) & -1u)
177 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
180 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
182 vcpu->arch.hflags |= HF_GUEST_MASK;
183 vcpu->stat.guest_mode = 1;
186 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
188 vcpu->arch.hflags &= ~HF_GUEST_MASK;
190 if (vcpu->arch.load_eoi_exitmap_pending) {
191 vcpu->arch.load_eoi_exitmap_pending = false;
192 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
195 vcpu->stat.guest_mode = 0;
198 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
200 return vcpu->arch.hflags & HF_GUEST_MASK;
203 static inline bool is_smm(struct kvm_vcpu *vcpu)
205 return vcpu->arch.hflags & HF_SMM_MASK;