GNU Linux-libre 4.9.301-gnu1
[releases.git] / arch / arm / kvm / psci.c
1 /*
2  * Copyright (C) 2012 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/arm-smccc.h>
19 #include <linux/preempt.h>
20 #include <linux/kvm_host.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include <asm/cputype.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_host.h>
27
28 #include <kvm/arm_psci.h>
29
30 /*
31  * This is an implementation of the Power State Coordination Interface
32  * as described in ARM document number ARM DEN 0022A.
33  */
34
35 #define AFFINITY_MASK(level)    ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
36
37 static u32 smccc_get_function(struct kvm_vcpu *vcpu)
38 {
39         return vcpu_get_reg(vcpu, 0);
40 }
41
42 static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
43 {
44         return vcpu_get_reg(vcpu, 1);
45 }
46
47 static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
48 {
49         return vcpu_get_reg(vcpu, 2);
50 }
51
52 static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
53 {
54         return vcpu_get_reg(vcpu, 3);
55 }
56
57 static void smccc_set_retval(struct kvm_vcpu *vcpu,
58                              unsigned long a0,
59                              unsigned long a1,
60                              unsigned long a2,
61                              unsigned long a3)
62 {
63         vcpu_set_reg(vcpu, 0, a0);
64         vcpu_set_reg(vcpu, 1, a1);
65         vcpu_set_reg(vcpu, 2, a2);
66         vcpu_set_reg(vcpu, 3, a3);
67 }
68
69 static unsigned long psci_affinity_mask(unsigned long affinity_level)
70 {
71         if (affinity_level <= 3)
72                 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
73
74         return 0;
75 }
76
77 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
78 {
79         /*
80          * NOTE: For simplicity, we make VCPU suspend emulation to be
81          * same-as WFI (Wait-for-interrupt) emulation.
82          *
83          * This means for KVM the wakeup events are interrupts and
84          * this is consistent with intended use of StateID as described
85          * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
86          *
87          * Further, we also treat power-down request to be same as
88          * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
89          * specification (ARM DEN 0022A). This means all suspend states
90          * for KVM will preserve the register state.
91          */
92         kvm_vcpu_block(vcpu);
93
94         return PSCI_RET_SUCCESS;
95 }
96
97 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
98 {
99         vcpu->arch.power_off = true;
100 }
101
102 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
103 {
104         struct kvm *kvm = source_vcpu->kvm;
105         struct kvm_vcpu *vcpu = NULL;
106         struct swait_queue_head *wq;
107         unsigned long cpu_id;
108         unsigned long context_id;
109         phys_addr_t target_pc;
110
111         cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
112         if (vcpu_mode_is_32bit(source_vcpu))
113                 cpu_id &= ~((u32) 0);
114
115         vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
116
117         /*
118          * Make sure the caller requested a valid CPU and that the CPU is
119          * turned off.
120          */
121         if (!vcpu)
122                 return PSCI_RET_INVALID_PARAMS;
123         if (!vcpu->arch.power_off) {
124                 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
125                         return PSCI_RET_ALREADY_ON;
126                 else
127                         return PSCI_RET_INVALID_PARAMS;
128         }
129
130         target_pc = smccc_get_arg2(source_vcpu);
131         context_id = smccc_get_arg3(source_vcpu);
132
133         kvm_reset_vcpu(vcpu);
134
135         /* Gracefully handle Thumb2 entry point */
136         if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
137                 target_pc &= ~((phys_addr_t) 1);
138                 vcpu_set_thumb(vcpu);
139         }
140
141         /* Propagate caller endianness */
142         if (kvm_vcpu_is_be(source_vcpu))
143                 kvm_vcpu_set_be(vcpu);
144
145         *vcpu_pc(vcpu) = target_pc;
146         /*
147          * NOTE: We always update r0 (or x0) because for PSCI v0.1
148          * the general puspose registers are undefined upon CPU_ON.
149          */
150         smccc_set_retval(vcpu, context_id, 0, 0, 0);
151         vcpu->arch.power_off = false;
152         smp_mb();               /* Make sure the above is visible */
153
154         wq = kvm_arch_vcpu_wq(vcpu);
155         swake_up(wq);
156
157         return PSCI_RET_SUCCESS;
158 }
159
160 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
161 {
162         int i, matching_cpus = 0;
163         unsigned long mpidr;
164         unsigned long target_affinity;
165         unsigned long target_affinity_mask;
166         unsigned long lowest_affinity_level;
167         struct kvm *kvm = vcpu->kvm;
168         struct kvm_vcpu *tmp;
169
170         target_affinity = smccc_get_arg1(vcpu);
171         lowest_affinity_level = smccc_get_arg2(vcpu);
172
173         /* Determine target affinity mask */
174         target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
175         if (!target_affinity_mask)
176                 return PSCI_RET_INVALID_PARAMS;
177
178         /* Ignore other bits of target affinity */
179         target_affinity &= target_affinity_mask;
180
181         /*
182          * If one or more VCPU matching target affinity are running
183          * then ON else OFF
184          */
185         kvm_for_each_vcpu(i, tmp, kvm) {
186                 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
187                 if ((mpidr & target_affinity_mask) == target_affinity) {
188                         matching_cpus++;
189                         if (!tmp->arch.power_off)
190                                 return PSCI_0_2_AFFINITY_LEVEL_ON;
191                 }
192         }
193
194         if (!matching_cpus)
195                 return PSCI_RET_INVALID_PARAMS;
196
197         return PSCI_0_2_AFFINITY_LEVEL_OFF;
198 }
199
200 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
201 {
202         int i;
203         struct kvm_vcpu *tmp;
204
205         /*
206          * The KVM ABI specifies that a system event exit may call KVM_RUN
207          * again and may perform shutdown/reboot at a later time that when the
208          * actual request is made.  Since we are implementing PSCI and a
209          * caller of PSCI reboot and shutdown expects that the system shuts
210          * down or reboots immediately, let's make sure that VCPUs are not run
211          * after this call is handled and before the VCPUs have been
212          * re-initialized.
213          */
214         kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
215                 tmp->arch.power_off = true;
216                 kvm_vcpu_kick(tmp);
217         }
218
219         memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
220         vcpu->run->system_event.type = type;
221         vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
222 }
223
224 static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
225 {
226         kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
227 }
228
229 static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
230 {
231         kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
232 }
233
234 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
235 {
236         struct kvm *kvm = vcpu->kvm;
237         unsigned long psci_fn = smccc_get_function(vcpu);
238         unsigned long val;
239         int ret = 1;
240
241         switch (psci_fn) {
242         case PSCI_0_2_FN_PSCI_VERSION:
243                 /*
244                  * Bits[31:16] = Major Version = 0
245                  * Bits[15:0] = Minor Version = 2
246                  */
247                 val = KVM_ARM_PSCI_0_2;
248                 break;
249         case PSCI_0_2_FN_CPU_SUSPEND:
250         case PSCI_0_2_FN64_CPU_SUSPEND:
251                 val = kvm_psci_vcpu_suspend(vcpu);
252                 break;
253         case PSCI_0_2_FN_CPU_OFF:
254                 kvm_psci_vcpu_off(vcpu);
255                 val = PSCI_RET_SUCCESS;
256                 break;
257         case PSCI_0_2_FN_CPU_ON:
258         case PSCI_0_2_FN64_CPU_ON:
259                 mutex_lock(&kvm->lock);
260                 val = kvm_psci_vcpu_on(vcpu);
261                 mutex_unlock(&kvm->lock);
262                 break;
263         case PSCI_0_2_FN_AFFINITY_INFO:
264         case PSCI_0_2_FN64_AFFINITY_INFO:
265                 val = kvm_psci_vcpu_affinity_info(vcpu);
266                 break;
267         case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
268                 /*
269                  * Trusted OS is MP hence does not require migration
270                  * or
271                  * Trusted OS is not present
272                  */
273                 val = PSCI_0_2_TOS_MP;
274                 break;
275         case PSCI_0_2_FN_SYSTEM_OFF:
276                 kvm_psci_system_off(vcpu);
277                 /*
278                  * We should'nt be going back to guest VCPU after
279                  * receiving SYSTEM_OFF request.
280                  *
281                  * If user space accidently/deliberately resumes
282                  * guest VCPU after SYSTEM_OFF request then guest
283                  * VCPU should see internal failure from PSCI return
284                  * value. To achieve this, we preload r0 (or x0) with
285                  * PSCI return value INTERNAL_FAILURE.
286                  */
287                 val = PSCI_RET_INTERNAL_FAILURE;
288                 ret = 0;
289                 break;
290         case PSCI_0_2_FN_SYSTEM_RESET:
291                 kvm_psci_system_reset(vcpu);
292                 /*
293                  * Same reason as SYSTEM_OFF for preloading r0 (or x0)
294                  * with PSCI return value INTERNAL_FAILURE.
295                  */
296                 val = PSCI_RET_INTERNAL_FAILURE;
297                 ret = 0;
298                 break;
299         default:
300                 val = PSCI_RET_NOT_SUPPORTED;
301                 break;
302         }
303
304         smccc_set_retval(vcpu, val, 0, 0, 0);
305         return ret;
306 }
307
308 static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
309 {
310         u32 psci_fn = smccc_get_function(vcpu);
311         u32 feature;
312         unsigned long val;
313         int ret = 1;
314
315         switch(psci_fn) {
316         case PSCI_0_2_FN_PSCI_VERSION:
317                 val = KVM_ARM_PSCI_1_0;
318                 break;
319         case PSCI_1_0_FN_PSCI_FEATURES:
320                 feature = smccc_get_arg1(vcpu);
321                 switch(feature) {
322                 case PSCI_0_2_FN_PSCI_VERSION:
323                 case PSCI_0_2_FN_CPU_SUSPEND:
324                 case PSCI_0_2_FN64_CPU_SUSPEND:
325                 case PSCI_0_2_FN_CPU_OFF:
326                 case PSCI_0_2_FN_CPU_ON:
327                 case PSCI_0_2_FN64_CPU_ON:
328                 case PSCI_0_2_FN_AFFINITY_INFO:
329                 case PSCI_0_2_FN64_AFFINITY_INFO:
330                 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
331                 case PSCI_0_2_FN_SYSTEM_OFF:
332                 case PSCI_0_2_FN_SYSTEM_RESET:
333                 case PSCI_1_0_FN_PSCI_FEATURES:
334                 case ARM_SMCCC_VERSION_FUNC_ID:
335                         val = 0;
336                         break;
337                 default:
338                         val = PSCI_RET_NOT_SUPPORTED;
339                         break;
340                 }
341                 break;
342         default:
343                 return kvm_psci_0_2_call(vcpu);
344         }
345
346         smccc_set_retval(vcpu, val, 0, 0, 0);
347         return ret;
348 }
349
350 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
351 {
352         struct kvm *kvm = vcpu->kvm;
353         unsigned long psci_fn = smccc_get_function(vcpu);
354         unsigned long val;
355
356         switch (psci_fn) {
357         case KVM_PSCI_FN_CPU_OFF:
358                 kvm_psci_vcpu_off(vcpu);
359                 val = PSCI_RET_SUCCESS;
360                 break;
361         case KVM_PSCI_FN_CPU_ON:
362                 mutex_lock(&kvm->lock);
363                 val = kvm_psci_vcpu_on(vcpu);
364                 mutex_unlock(&kvm->lock);
365                 break;
366         default:
367                 val = PSCI_RET_NOT_SUPPORTED;
368                 break;
369         }
370
371         smccc_set_retval(vcpu, val, 0, 0, 0);
372         return 1;
373 }
374
375 /**
376  * kvm_psci_call - handle PSCI call if r0 value is in range
377  * @vcpu: Pointer to the VCPU struct
378  *
379  * Handle PSCI calls from guests through traps from HVC instructions.
380  * The calling convention is similar to SMC calls to the secure world
381  * where the function number is placed in r0.
382  *
383  * This function returns: > 0 (success), 0 (success but exit to user
384  * space), and < 0 (errors)
385  *
386  * Errors:
387  * -EINVAL: Unrecognized PSCI function
388  */
389 static int kvm_psci_call(struct kvm_vcpu *vcpu)
390 {
391         switch (kvm_psci_version(vcpu, vcpu->kvm)) {
392         case KVM_ARM_PSCI_1_0:
393                 return kvm_psci_1_0_call(vcpu);
394         case KVM_ARM_PSCI_0_2:
395                 return kvm_psci_0_2_call(vcpu);
396         case KVM_ARM_PSCI_0_1:
397                 return kvm_psci_0_1_call(vcpu);
398         default:
399                 return -EINVAL;
400         };
401 }
402
403 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
404 {
405         u32 func_id = smccc_get_function(vcpu);
406         u32 val = SMCCC_RET_NOT_SUPPORTED;
407         u32 feature;
408
409         switch (func_id) {
410         case ARM_SMCCC_VERSION_FUNC_ID:
411                 val = ARM_SMCCC_VERSION_1_1;
412                 break;
413         case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
414                 feature = smccc_get_arg1(vcpu);
415                 switch(feature) {
416                 case ARM_SMCCC_ARCH_WORKAROUND_1:
417                         if (kvm_arm_harden_branch_predictor())
418                                 val = SMCCC_RET_SUCCESS;
419                         break;
420                 case ARM_SMCCC_ARCH_WORKAROUND_2:
421                         switch (kvm_arm_have_ssbd()) {
422                         case KVM_SSBD_FORCE_DISABLE:
423                         case KVM_SSBD_UNKNOWN:
424                                 break;
425                         case KVM_SSBD_KERNEL:
426                                 val = SMCCC_RET_SUCCESS;
427                                 break;
428                         case KVM_SSBD_FORCE_ENABLE:
429                         case KVM_SSBD_MITIGATED:
430                                 val = SMCCC_RET_NOT_REQUIRED;
431                                 break;
432                         }
433                         break;
434                 }
435                 break;
436         default:
437                 return kvm_psci_call(vcpu);
438         }
439
440         smccc_set_retval(vcpu, val, 0, 0, 0);
441         return 1;
442 }
443
444 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
445 {
446         return 1;               /* PSCI version */
447 }
448
449 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
450 {
451         if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
452                 return -EFAULT;
453
454         return 0;
455 }
456
457 int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
458 {
459         if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
460                 void __user *uaddr = (void __user *)(long)reg->addr;
461                 u64 val;
462
463                 val = kvm_psci_version(vcpu, vcpu->kvm);
464                 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
465                         return -EFAULT;
466
467                 return 0;
468         }
469
470         return -EINVAL;
471 }
472
473 int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
474 {
475         if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
476                 void __user *uaddr = (void __user *)(long)reg->addr;
477                 bool wants_02;
478                 u64 val;
479
480                 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
481                         return -EFAULT;
482
483                 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
484
485                 switch (val) {
486                 case KVM_ARM_PSCI_0_1:
487                         if (wants_02)
488                                 return -EINVAL;
489                         vcpu->kvm->arch.psci_version = val;
490                         return 0;
491                 case KVM_ARM_PSCI_0_2:
492                 case KVM_ARM_PSCI_1_0:
493                         if (!wants_02)
494                                 return -EINVAL;
495                         vcpu->kvm->arch.psci_version = val;
496                         return 0;
497                 }
498         }
499
500         return -EINVAL;
501 }