2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/vmalloc.h>
25 #include <kvm/arm_psci.h>
26 #include <asm/cputype.h>
27 #include <linux/uaccess.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_coproc.h>
32 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
33 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
35 struct kvm_stats_debugfs_item debugfs_entries[] = {
36 VCPU_STAT(hvc_exit_stat),
37 VCPU_STAT(wfe_exit_stat),
38 VCPU_STAT(wfi_exit_stat),
39 VCPU_STAT(mmio_exit_user),
40 VCPU_STAT(mmio_exit_kernel),
45 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
50 static u64 core_reg_offset_from_id(u64 id)
52 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
55 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
57 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
58 struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
61 if (KVM_REG_SIZE(reg->id) != 4)
64 /* Our ID is an index into the kvm_regs struct. */
65 off = core_reg_offset_from_id(reg->id);
66 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
69 return put_user(((u32 *)regs)[off], uaddr);
72 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
74 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
75 struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
78 if (KVM_REG_SIZE(reg->id) != 4)
81 /* Our ID is an index into the kvm_regs struct. */
82 off = core_reg_offset_from_id(reg->id);
83 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
86 if (get_user(val, uaddr) != 0)
89 if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
90 unsigned long mode = val & MODE_MASK;
104 ((u32 *)regs)[off] = val;
108 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
113 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
118 #define NUM_TIMER_REGS 3
120 static bool is_timer_reg(u64 index)
123 case KVM_REG_ARM_TIMER_CTL:
124 case KVM_REG_ARM_TIMER_CNT:
125 case KVM_REG_ARM_TIMER_CVAL:
131 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
133 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
136 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
139 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
145 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
147 void __user *uaddr = (void __user *)(long)reg->addr;
151 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
155 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
158 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
160 void __user *uaddr = (void __user *)(long)reg->addr;
163 val = kvm_arm_timer_get_reg(vcpu, reg->id);
164 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
167 static unsigned long num_core_regs(void)
169 return sizeof(struct kvm_regs) / sizeof(u32);
173 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
175 * This is for all registers.
177 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
179 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
180 + kvm_arm_get_fw_num_regs(vcpu)
185 * kvm_arm_copy_reg_indices - get indices of all registers.
187 * We do core registers right here, then we append coproc regs.
189 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
192 const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
195 for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
196 if (put_user(core_reg | i, uindices))
201 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
204 uindices += kvm_arm_get_fw_num_regs(vcpu);
206 ret = copy_timer_indices(vcpu, uindices);
209 uindices += NUM_TIMER_REGS;
211 return kvm_arm_copy_coproc_indices(vcpu, uindices);
214 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
216 /* We currently use nothing arch-specific in upper 32 bits */
217 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
220 /* Register group 16 means we want a core register. */
221 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
222 return get_core_reg(vcpu, reg);
224 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
225 return kvm_arm_get_fw_reg(vcpu, reg);
227 if (is_timer_reg(reg->id))
228 return get_timer_reg(vcpu, reg);
230 return kvm_arm_coproc_get_reg(vcpu, reg);
233 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
235 /* We currently use nothing arch-specific in upper 32 bits */
236 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
239 /* Register group 16 means we set a core register. */
240 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
241 return set_core_reg(vcpu, reg);
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
244 return kvm_arm_set_fw_reg(vcpu, reg);
246 if (is_timer_reg(reg->id))
247 return set_timer_reg(vcpu, reg);
249 return kvm_arm_coproc_set_reg(vcpu, reg);
252 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
253 struct kvm_sregs *sregs)
258 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
259 struct kvm_sregs *sregs)
265 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
266 struct kvm_vcpu_events *events)
268 events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
273 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
274 struct kvm_vcpu_events *events)
276 bool serror_pending = events->exception.serror_pending;
277 bool has_esr = events->exception.serror_has_esr;
279 if (serror_pending && has_esr)
281 else if (serror_pending)
282 kvm_inject_vabt(vcpu);
287 int __attribute_const__ kvm_target_cpu(void)
289 switch (read_cpuid_part()) {
290 case ARM_CPU_PART_CORTEX_A7:
291 return KVM_ARM_TARGET_CORTEX_A7;
292 case ARM_CPU_PART_CORTEX_A15:
293 return KVM_ARM_TARGET_CORTEX_A15;
299 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
301 int target = kvm_target_cpu();
306 memset(init, 0, sizeof(*init));
309 * For now, we don't return any features.
310 * In future, we might use features to return target
311 * specific features available for the preferred
314 init->target = (__u32)target;
319 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
324 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
329 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
330 struct kvm_translation *tr)
335 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
336 struct kvm_guest_debug *dbg)
341 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
342 struct kvm_device_attr *attr)
346 switch (attr->group) {
347 case KVM_ARM_VCPU_TIMER_CTRL:
348 ret = kvm_arm_timer_set_attr(vcpu, attr);
358 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
359 struct kvm_device_attr *attr)
363 switch (attr->group) {
364 case KVM_ARM_VCPU_TIMER_CTRL:
365 ret = kvm_arm_timer_get_attr(vcpu, attr);
375 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
376 struct kvm_device_attr *attr)
380 switch (attr->group) {
381 case KVM_ARM_VCPU_TIMER_CTRL:
382 ret = kvm_arm_timer_has_attr(vcpu, attr);