2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/preempt.h>
20 #include <linux/kvm_host.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
24 #include <asm/cputype.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_host.h>
28 #include <kvm/arm_psci.h>
31 * This is an implementation of the Power State Coordination Interface
32 * as described in ARM document number ARM DEN 0022A.
35 #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
37 static u32 smccc_get_function(struct kvm_vcpu *vcpu)
39 return vcpu_get_reg(vcpu, 0);
42 static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
44 return vcpu_get_reg(vcpu, 1);
47 static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
49 return vcpu_get_reg(vcpu, 2);
52 static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
54 return vcpu_get_reg(vcpu, 3);
57 static void smccc_set_retval(struct kvm_vcpu *vcpu,
63 vcpu_set_reg(vcpu, 0, a0);
64 vcpu_set_reg(vcpu, 1, a1);
65 vcpu_set_reg(vcpu, 2, a2);
66 vcpu_set_reg(vcpu, 3, a3);
69 static unsigned long psci_affinity_mask(unsigned long affinity_level)
71 if (affinity_level <= 3)
72 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
77 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
80 * NOTE: For simplicity, we make VCPU suspend emulation to be
81 * same-as WFI (Wait-for-interrupt) emulation.
83 * This means for KVM the wakeup events are interrupts and
84 * this is consistent with intended use of StateID as described
85 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
87 * Further, we also treat power-down request to be same as
88 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
89 * specification (ARM DEN 0022A). This means all suspend states
90 * for KVM will preserve the register state.
93 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
95 return PSCI_RET_SUCCESS;
98 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
100 vcpu->arch.power_off = true;
101 kvm_make_request(KVM_REQ_SLEEP, vcpu);
105 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
107 struct vcpu_reset_state *reset_state;
108 struct kvm *kvm = source_vcpu->kvm;
109 struct kvm_vcpu *vcpu = NULL;
110 unsigned long cpu_id;
112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
113 if (vcpu_mode_is_32bit(source_vcpu))
114 cpu_id &= ~((u32) 0);
116 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
119 * Make sure the caller requested a valid CPU and that the CPU is
123 return PSCI_RET_INVALID_PARAMS;
124 if (!vcpu->arch.power_off) {
125 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
126 return PSCI_RET_ALREADY_ON;
128 return PSCI_RET_INVALID_PARAMS;
131 reset_state = &vcpu->arch.reset_state;
133 reset_state->pc = smccc_get_arg2(source_vcpu);
135 /* Propagate caller endianness */
136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
139 * NOTE: We always update r0 (or x0) because for PSCI v0.1
140 * the general puspose registers are undefined upon CPU_ON.
142 reset_state->r0 = smccc_get_arg3(source_vcpu);
144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
148 * Make sure the reset request is observed if the change to
149 * power_state is observed.
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
156 return PSCI_RET_SUCCESS;
159 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
161 int i, matching_cpus = 0;
163 unsigned long target_affinity;
164 unsigned long target_affinity_mask;
165 unsigned long lowest_affinity_level;
166 struct kvm *kvm = vcpu->kvm;
167 struct kvm_vcpu *tmp;
169 target_affinity = smccc_get_arg1(vcpu);
170 lowest_affinity_level = smccc_get_arg2(vcpu);
172 /* Determine target affinity mask */
173 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
174 if (!target_affinity_mask)
175 return PSCI_RET_INVALID_PARAMS;
177 /* Ignore other bits of target affinity */
178 target_affinity &= target_affinity_mask;
181 * If one or more VCPU matching target affinity are running
184 kvm_for_each_vcpu(i, tmp, kvm) {
185 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
186 if ((mpidr & target_affinity_mask) == target_affinity) {
188 if (!tmp->arch.power_off)
189 return PSCI_0_2_AFFINITY_LEVEL_ON;
194 return PSCI_RET_INVALID_PARAMS;
196 return PSCI_0_2_AFFINITY_LEVEL_OFF;
199 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
202 struct kvm_vcpu *tmp;
205 * The KVM ABI specifies that a system event exit may call KVM_RUN
206 * again and may perform shutdown/reboot at a later time that when the
207 * actual request is made. Since we are implementing PSCI and a
208 * caller of PSCI reboot and shutdown expects that the system shuts
209 * down or reboots immediately, let's make sure that VCPUs are not run
210 * after this call is handled and before the VCPUs have been
213 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
214 tmp->arch.power_off = true;
215 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
217 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
218 vcpu->run->system_event.type = type;
219 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
222 static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
224 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
227 static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
229 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
232 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
234 struct kvm *kvm = vcpu->kvm;
235 u32 psci_fn = smccc_get_function(vcpu);
240 case PSCI_0_2_FN_PSCI_VERSION:
242 * Bits[31:16] = Major Version = 0
243 * Bits[15:0] = Minor Version = 2
245 val = KVM_ARM_PSCI_0_2;
247 case PSCI_0_2_FN_CPU_SUSPEND:
248 case PSCI_0_2_FN64_CPU_SUSPEND:
249 val = kvm_psci_vcpu_suspend(vcpu);
251 case PSCI_0_2_FN_CPU_OFF:
252 kvm_psci_vcpu_off(vcpu);
253 val = PSCI_RET_SUCCESS;
255 case PSCI_0_2_FN_CPU_ON:
256 case PSCI_0_2_FN64_CPU_ON:
257 mutex_lock(&kvm->lock);
258 val = kvm_psci_vcpu_on(vcpu);
259 mutex_unlock(&kvm->lock);
261 case PSCI_0_2_FN_AFFINITY_INFO:
262 case PSCI_0_2_FN64_AFFINITY_INFO:
263 val = kvm_psci_vcpu_affinity_info(vcpu);
265 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
267 * Trusted OS is MP hence does not require migration
269 * Trusted OS is not present
271 val = PSCI_0_2_TOS_MP;
273 case PSCI_0_2_FN_SYSTEM_OFF:
274 kvm_psci_system_off(vcpu);
276 * We should'nt be going back to guest VCPU after
277 * receiving SYSTEM_OFF request.
279 * If user space accidently/deliberately resumes
280 * guest VCPU after SYSTEM_OFF request then guest
281 * VCPU should see internal failure from PSCI return
282 * value. To achieve this, we preload r0 (or x0) with
283 * PSCI return value INTERNAL_FAILURE.
285 val = PSCI_RET_INTERNAL_FAILURE;
288 case PSCI_0_2_FN_SYSTEM_RESET:
289 kvm_psci_system_reset(vcpu);
291 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
292 * with PSCI return value INTERNAL_FAILURE.
294 val = PSCI_RET_INTERNAL_FAILURE;
298 val = PSCI_RET_NOT_SUPPORTED;
302 smccc_set_retval(vcpu, val, 0, 0, 0);
306 static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
308 u32 psci_fn = smccc_get_function(vcpu);
314 case PSCI_0_2_FN_PSCI_VERSION:
315 val = KVM_ARM_PSCI_1_0;
317 case PSCI_1_0_FN_PSCI_FEATURES:
318 feature = smccc_get_arg1(vcpu);
320 case PSCI_0_2_FN_PSCI_VERSION:
321 case PSCI_0_2_FN_CPU_SUSPEND:
322 case PSCI_0_2_FN64_CPU_SUSPEND:
323 case PSCI_0_2_FN_CPU_OFF:
324 case PSCI_0_2_FN_CPU_ON:
325 case PSCI_0_2_FN64_CPU_ON:
326 case PSCI_0_2_FN_AFFINITY_INFO:
327 case PSCI_0_2_FN64_AFFINITY_INFO:
328 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
329 case PSCI_0_2_FN_SYSTEM_OFF:
330 case PSCI_0_2_FN_SYSTEM_RESET:
331 case PSCI_1_0_FN_PSCI_FEATURES:
332 case ARM_SMCCC_VERSION_FUNC_ID:
336 val = PSCI_RET_NOT_SUPPORTED;
341 return kvm_psci_0_2_call(vcpu);
344 smccc_set_retval(vcpu, val, 0, 0, 0);
348 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
350 struct kvm *kvm = vcpu->kvm;
351 u32 psci_fn = smccc_get_function(vcpu);
355 case KVM_PSCI_FN_CPU_OFF:
356 kvm_psci_vcpu_off(vcpu);
357 val = PSCI_RET_SUCCESS;
359 case KVM_PSCI_FN_CPU_ON:
360 mutex_lock(&kvm->lock);
361 val = kvm_psci_vcpu_on(vcpu);
362 mutex_unlock(&kvm->lock);
365 val = PSCI_RET_NOT_SUPPORTED;
369 smccc_set_retval(vcpu, val, 0, 0, 0);
374 * kvm_psci_call - handle PSCI call if r0 value is in range
375 * @vcpu: Pointer to the VCPU struct
377 * Handle PSCI calls from guests through traps from HVC instructions.
378 * The calling convention is similar to SMC calls to the secure world
379 * where the function number is placed in r0.
381 * This function returns: > 0 (success), 0 (success but exit to user
382 * space), and < 0 (errors)
385 * -EINVAL: Unrecognized PSCI function
387 static int kvm_psci_call(struct kvm_vcpu *vcpu)
389 switch (kvm_psci_version(vcpu, vcpu->kvm)) {
390 case KVM_ARM_PSCI_1_0:
391 return kvm_psci_1_0_call(vcpu);
392 case KVM_ARM_PSCI_0_2:
393 return kvm_psci_0_2_call(vcpu);
394 case KVM_ARM_PSCI_0_1:
395 return kvm_psci_0_1_call(vcpu);
401 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
403 u32 func_id = smccc_get_function(vcpu);
404 u32 val = SMCCC_RET_NOT_SUPPORTED;
408 case ARM_SMCCC_VERSION_FUNC_ID:
409 val = ARM_SMCCC_VERSION_1_1;
411 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
412 feature = smccc_get_arg1(vcpu);
414 case ARM_SMCCC_ARCH_WORKAROUND_1:
415 if (kvm_arm_harden_branch_predictor())
416 val = SMCCC_RET_SUCCESS;
418 case ARM_SMCCC_ARCH_WORKAROUND_2:
419 switch (kvm_arm_have_ssbd()) {
420 case KVM_SSBD_FORCE_DISABLE:
421 case KVM_SSBD_UNKNOWN:
423 case KVM_SSBD_KERNEL:
424 val = SMCCC_RET_SUCCESS;
426 case KVM_SSBD_FORCE_ENABLE:
427 case KVM_SSBD_MITIGATED:
428 val = SMCCC_RET_NOT_REQUIRED;
435 return kvm_psci_call(vcpu);
438 smccc_set_retval(vcpu, val, 0, 0, 0);
442 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
444 return 1; /* PSCI version */
447 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
449 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
455 int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
457 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
458 void __user *uaddr = (void __user *)(long)reg->addr;
461 val = kvm_psci_version(vcpu, vcpu->kvm);
462 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
471 int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
473 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
474 void __user *uaddr = (void __user *)(long)reg->addr;
478 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
481 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
484 case KVM_ARM_PSCI_0_1:
487 vcpu->kvm->arch.psci_version = val;
489 case KVM_ARM_PSCI_0_2:
490 case KVM_ARM_PSCI_1_0:
493 vcpu->kvm->arch.psci_version = val;