GNU Linux-libre 6.8.7-gnu
[releases.git] / arch / riscv / kvm / vcpu_sbi.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17         .extid_start = -1UL,
18         .extid_end = -1UL,
19         .handler = NULL,
20 };
21 #endif
22
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25         .extid_start = -1UL,
26         .extid_end = -1UL,
27         .handler = NULL,
28 };
29 #endif
30
31 struct kvm_riscv_sbi_extension_entry {
32         enum KVM_RISCV_SBI_EXT_ID ext_idx;
33         const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37         {
38                 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39                 .ext_ptr = &vcpu_sbi_ext_v01,
40         },
41         {
42                 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43                 .ext_ptr = &vcpu_sbi_ext_base,
44         },
45         {
46                 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47                 .ext_ptr = &vcpu_sbi_ext_time,
48         },
49         {
50                 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51                 .ext_ptr = &vcpu_sbi_ext_ipi,
52         },
53         {
54                 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55                 .ext_ptr = &vcpu_sbi_ext_rfence,
56         },
57         {
58                 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59                 .ext_ptr = &vcpu_sbi_ext_srst,
60         },
61         {
62                 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63                 .ext_ptr = &vcpu_sbi_ext_hsm,
64         },
65         {
66                 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67                 .ext_ptr = &vcpu_sbi_ext_pmu,
68         },
69         {
70                 .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71                 .ext_ptr = &vcpu_sbi_ext_dbcn,
72         },
73         {
74                 .ext_idx = KVM_RISCV_SBI_EXT_STA,
75                 .ext_ptr = &vcpu_sbi_ext_sta,
76         },
77         {
78                 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
79                 .ext_ptr = &vcpu_sbi_ext_experimental,
80         },
81         {
82                 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
83                 .ext_ptr = &vcpu_sbi_ext_vendor,
84         },
85 };
86
87 static const struct kvm_riscv_sbi_extension_entry *
88 riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
89 {
90         const struct kvm_riscv_sbi_extension_entry *sext = NULL;
91
92         if (idx >= KVM_RISCV_SBI_EXT_MAX)
93                 return NULL;
94
95         for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
96                 if (sbi_ext[i].ext_idx == idx) {
97                         sext = &sbi_ext[i];
98                         break;
99                 }
100         }
101
102         return sext;
103 }
104
105 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
106 {
107         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
108         const struct kvm_riscv_sbi_extension_entry *sext;
109
110         sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
111
112         return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
113 }
114
115 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
116 {
117         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
118
119         vcpu->arch.sbi_context.return_handled = 0;
120         vcpu->stat.ecall_exit_stat++;
121         run->exit_reason = KVM_EXIT_RISCV_SBI;
122         run->riscv_sbi.extension_id = cp->a7;
123         run->riscv_sbi.function_id = cp->a6;
124         run->riscv_sbi.args[0] = cp->a0;
125         run->riscv_sbi.args[1] = cp->a1;
126         run->riscv_sbi.args[2] = cp->a2;
127         run->riscv_sbi.args[3] = cp->a3;
128         run->riscv_sbi.args[4] = cp->a4;
129         run->riscv_sbi.args[5] = cp->a5;
130         run->riscv_sbi.ret[0] = cp->a0;
131         run->riscv_sbi.ret[1] = cp->a1;
132 }
133
134 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
135                                      struct kvm_run *run,
136                                      u32 type, u64 reason)
137 {
138         unsigned long i;
139         struct kvm_vcpu *tmp;
140
141         kvm_for_each_vcpu(i, tmp, vcpu->kvm)
142                 tmp->arch.power_off = true;
143         kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
144
145         memset(&run->system_event, 0, sizeof(run->system_event));
146         run->system_event.type = type;
147         run->system_event.ndata = 1;
148         run->system_event.data[0] = reason;
149         run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
150 }
151
152 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
153 {
154         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
155
156         /* Handle SBI return only once */
157         if (vcpu->arch.sbi_context.return_handled)
158                 return 0;
159         vcpu->arch.sbi_context.return_handled = 1;
160
161         /* Update return values */
162         cp->a0 = run->riscv_sbi.ret[0];
163         cp->a1 = run->riscv_sbi.ret[1];
164
165         /* Move to next instruction */
166         vcpu->arch.guest_context.sepc += 4;
167
168         return 0;
169 }
170
171 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
172                                          unsigned long reg_num,
173                                          unsigned long reg_val)
174 {
175         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
176         const struct kvm_riscv_sbi_extension_entry *sext;
177
178         if (reg_val != 1 && reg_val != 0)
179                 return -EINVAL;
180
181         sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
182         if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
183                 return -ENOENT;
184
185         scontext->ext_status[sext->ext_idx] = (reg_val) ?
186                         KVM_RISCV_SBI_EXT_STATUS_ENABLED :
187                         KVM_RISCV_SBI_EXT_STATUS_DISABLED;
188
189         return 0;
190 }
191
192 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
193                                          unsigned long reg_num,
194                                          unsigned long *reg_val)
195 {
196         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
197         const struct kvm_riscv_sbi_extension_entry *sext;
198
199         sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
200         if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
201                 return -ENOENT;
202
203         *reg_val = scontext->ext_status[sext->ext_idx] ==
204                                 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
205
206         return 0;
207 }
208
209 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
210                                         unsigned long reg_num,
211                                         unsigned long reg_val, bool enable)
212 {
213         unsigned long i, ext_id;
214
215         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
216                 return -ENOENT;
217
218         for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
219                 ext_id = i + reg_num * BITS_PER_LONG;
220                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
221                         break;
222
223                 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
224         }
225
226         return 0;
227 }
228
229 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
230                                         unsigned long reg_num,
231                                         unsigned long *reg_val)
232 {
233         unsigned long i, ext_id, ext_val;
234
235         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
236                 return -ENOENT;
237
238         for (i = 0; i < BITS_PER_LONG; i++) {
239                 ext_id = i + reg_num * BITS_PER_LONG;
240                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
241                         break;
242
243                 ext_val = 0;
244                 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
245                 if (ext_val)
246                         *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
247         }
248
249         return 0;
250 }
251
252 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
253                                    const struct kvm_one_reg *reg)
254 {
255         unsigned long __user *uaddr =
256                         (unsigned long __user *)(unsigned long)reg->addr;
257         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
258                                             KVM_REG_SIZE_MASK |
259                                             KVM_REG_RISCV_SBI_EXT);
260         unsigned long reg_val, reg_subtype;
261
262         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
263                 return -EINVAL;
264
265         if (vcpu->arch.ran_atleast_once)
266                 return -EBUSY;
267
268         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
269         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
270
271         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
272                 return -EFAULT;
273
274         switch (reg_subtype) {
275         case KVM_REG_RISCV_SBI_SINGLE:
276                 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
277         case KVM_REG_RISCV_SBI_MULTI_EN:
278                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
279         case KVM_REG_RISCV_SBI_MULTI_DIS:
280                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
281         default:
282                 return -ENOENT;
283         }
284
285         return 0;
286 }
287
288 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
289                                    const struct kvm_one_reg *reg)
290 {
291         int rc;
292         unsigned long __user *uaddr =
293                         (unsigned long __user *)(unsigned long)reg->addr;
294         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
295                                             KVM_REG_SIZE_MASK |
296                                             KVM_REG_RISCV_SBI_EXT);
297         unsigned long reg_val, reg_subtype;
298
299         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
300                 return -EINVAL;
301
302         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
303         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
304
305         reg_val = 0;
306         switch (reg_subtype) {
307         case KVM_REG_RISCV_SBI_SINGLE:
308                 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
309                 break;
310         case KVM_REG_RISCV_SBI_MULTI_EN:
311         case KVM_REG_RISCV_SBI_MULTI_DIS:
312                 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
313                 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
314                         reg_val = ~reg_val;
315                 break;
316         default:
317                 rc = -ENOENT;
318         }
319         if (rc)
320                 return rc;
321
322         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
323                 return -EFAULT;
324
325         return 0;
326 }
327
328 int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
329                                const struct kvm_one_reg *reg)
330 {
331         unsigned long __user *uaddr =
332                         (unsigned long __user *)(unsigned long)reg->addr;
333         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
334                                             KVM_REG_SIZE_MASK |
335                                             KVM_REG_RISCV_SBI_STATE);
336         unsigned long reg_subtype, reg_val;
337
338         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
339                 return -EINVAL;
340
341         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
342                 return -EFAULT;
343
344         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
345         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
346
347         switch (reg_subtype) {
348         case KVM_REG_RISCV_SBI_STA:
349                 return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
350         default:
351                 return -EINVAL;
352         }
353
354         return 0;
355 }
356
357 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
358                                const struct kvm_one_reg *reg)
359 {
360         unsigned long __user *uaddr =
361                         (unsigned long __user *)(unsigned long)reg->addr;
362         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
363                                             KVM_REG_SIZE_MASK |
364                                             KVM_REG_RISCV_SBI_STATE);
365         unsigned long reg_subtype, reg_val;
366         int ret;
367
368         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
369                 return -EINVAL;
370
371         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
372         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
373
374         switch (reg_subtype) {
375         case KVM_REG_RISCV_SBI_STA:
376                 ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
377                 break;
378         default:
379                 return -EINVAL;
380         }
381
382         if (ret)
383                 return ret;
384
385         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
386                 return -EFAULT;
387
388         return 0;
389 }
390
391 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
392                                 struct kvm_vcpu *vcpu, unsigned long extid)
393 {
394         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
395         const struct kvm_riscv_sbi_extension_entry *entry;
396         const struct kvm_vcpu_sbi_extension *ext;
397         int i;
398
399         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
400                 entry = &sbi_ext[i];
401                 ext = entry->ext_ptr;
402
403                 if (ext->extid_start <= extid && ext->extid_end >= extid) {
404                         if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
405                             scontext->ext_status[entry->ext_idx] ==
406                                                 KVM_RISCV_SBI_EXT_STATUS_ENABLED)
407                                 return ext;
408
409                         return NULL;
410                 }
411         }
412
413         return NULL;
414 }
415
416 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
417 {
418         int ret = 1;
419         bool next_sepc = true;
420         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
421         const struct kvm_vcpu_sbi_extension *sbi_ext;
422         struct kvm_cpu_trap utrap = {0};
423         struct kvm_vcpu_sbi_return sbi_ret = {
424                 .out_val = 0,
425                 .err_val = 0,
426                 .utrap = &utrap,
427         };
428         bool ext_is_v01 = false;
429
430         sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
431         if (sbi_ext && sbi_ext->handler) {
432 #ifdef CONFIG_RISCV_SBI_V01
433                 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
434                     cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
435                         ext_is_v01 = true;
436 #endif
437                 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
438         } else {
439                 /* Return error for unsupported SBI calls */
440                 cp->a0 = SBI_ERR_NOT_SUPPORTED;
441                 goto ecall_done;
442         }
443
444         /*
445          * When the SBI extension returns a Linux error code, it exits the ioctl
446          * loop and forwards the error to userspace.
447          */
448         if (ret < 0) {
449                 next_sepc = false;
450                 goto ecall_done;
451         }
452
453         /* Handle special error cases i.e trap, exit or userspace forward */
454         if (sbi_ret.utrap->scause) {
455                 /* No need to increment sepc or exit ioctl loop */
456                 ret = 1;
457                 sbi_ret.utrap->sepc = cp->sepc;
458                 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
459                 next_sepc = false;
460                 goto ecall_done;
461         }
462
463         /* Exit ioctl loop or Propagate the error code the guest */
464         if (sbi_ret.uexit) {
465                 next_sepc = false;
466                 ret = 0;
467         } else {
468                 cp->a0 = sbi_ret.err_val;
469                 ret = 1;
470         }
471 ecall_done:
472         if (next_sepc)
473                 cp->sepc += 4;
474         /* a1 should only be updated when we continue the ioctl loop */
475         if (!ext_is_v01 && ret == 1)
476                 cp->a1 = sbi_ret.out_val;
477
478         return ret;
479 }
480
481 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
482 {
483         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
484         const struct kvm_riscv_sbi_extension_entry *entry;
485         const struct kvm_vcpu_sbi_extension *ext;
486         int i;
487
488         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
489                 entry = &sbi_ext[i];
490                 ext = entry->ext_ptr;
491
492                 if (ext->probe && !ext->probe(vcpu)) {
493                         scontext->ext_status[entry->ext_idx] =
494                                 KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
495                         continue;
496                 }
497
498                 scontext->ext_status[entry->ext_idx] = ext->default_disabled ?
499                                         KVM_RISCV_SBI_EXT_STATUS_DISABLED :
500                                         KVM_RISCV_SBI_EXT_STATUS_ENABLED;
501         }
502 }