1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_vcpu_sbi.h>
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
31 struct kvm_riscv_sbi_extension_entry {
32 enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 const struct kvm_vcpu_sbi_extension *ext_ptr;
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
38 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39 .ext_ptr = &vcpu_sbi_ext_v01,
42 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 .ext_ptr = &vcpu_sbi_ext_base,
46 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 .ext_ptr = &vcpu_sbi_ext_time,
50 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 .ext_ptr = &vcpu_sbi_ext_ipi,
54 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 .ext_ptr = &vcpu_sbi_ext_rfence,
58 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 .ext_ptr = &vcpu_sbi_ext_srst,
62 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 .ext_ptr = &vcpu_sbi_ext_hsm,
66 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 .ext_ptr = &vcpu_sbi_ext_pmu,
70 .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71 .ext_ptr = &vcpu_sbi_ext_dbcn,
74 .ext_idx = KVM_RISCV_SBI_EXT_STA,
75 .ext_ptr = &vcpu_sbi_ext_sta,
78 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
79 .ext_ptr = &vcpu_sbi_ext_experimental,
82 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
83 .ext_ptr = &vcpu_sbi_ext_vendor,
87 static const struct kvm_riscv_sbi_extension_entry *
88 riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
90 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
92 if (idx >= KVM_RISCV_SBI_EXT_MAX)
95 for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
96 if (sbi_ext[i].ext_idx == idx) {
105 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
107 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
108 const struct kvm_riscv_sbi_extension_entry *sext;
110 sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
112 return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
115 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
117 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
119 vcpu->arch.sbi_context.return_handled = 0;
120 vcpu->stat.ecall_exit_stat++;
121 run->exit_reason = KVM_EXIT_RISCV_SBI;
122 run->riscv_sbi.extension_id = cp->a7;
123 run->riscv_sbi.function_id = cp->a6;
124 run->riscv_sbi.args[0] = cp->a0;
125 run->riscv_sbi.args[1] = cp->a1;
126 run->riscv_sbi.args[2] = cp->a2;
127 run->riscv_sbi.args[3] = cp->a3;
128 run->riscv_sbi.args[4] = cp->a4;
129 run->riscv_sbi.args[5] = cp->a5;
130 run->riscv_sbi.ret[0] = cp->a0;
131 run->riscv_sbi.ret[1] = cp->a1;
134 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
136 u32 type, u64 reason)
139 struct kvm_vcpu *tmp;
141 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
142 tmp->arch.power_off = true;
143 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
145 memset(&run->system_event, 0, sizeof(run->system_event));
146 run->system_event.type = type;
147 run->system_event.ndata = 1;
148 run->system_event.data[0] = reason;
149 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
152 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
154 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
156 /* Handle SBI return only once */
157 if (vcpu->arch.sbi_context.return_handled)
159 vcpu->arch.sbi_context.return_handled = 1;
161 /* Update return values */
162 cp->a0 = run->riscv_sbi.ret[0];
163 cp->a1 = run->riscv_sbi.ret[1];
165 /* Move to next instruction */
166 vcpu->arch.guest_context.sepc += 4;
171 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
172 unsigned long reg_num,
173 unsigned long reg_val)
175 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
176 const struct kvm_riscv_sbi_extension_entry *sext;
178 if (reg_val != 1 && reg_val != 0)
181 sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
182 if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
185 scontext->ext_status[sext->ext_idx] = (reg_val) ?
186 KVM_RISCV_SBI_EXT_STATUS_ENABLED :
187 KVM_RISCV_SBI_EXT_STATUS_DISABLED;
192 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
193 unsigned long reg_num,
194 unsigned long *reg_val)
196 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
197 const struct kvm_riscv_sbi_extension_entry *sext;
199 sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
200 if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
203 *reg_val = scontext->ext_status[sext->ext_idx] ==
204 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
209 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
210 unsigned long reg_num,
211 unsigned long reg_val, bool enable)
213 unsigned long i, ext_id;
215 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
218 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
219 ext_id = i + reg_num * BITS_PER_LONG;
220 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
223 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
229 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
230 unsigned long reg_num,
231 unsigned long *reg_val)
233 unsigned long i, ext_id, ext_val;
235 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
238 for (i = 0; i < BITS_PER_LONG; i++) {
239 ext_id = i + reg_num * BITS_PER_LONG;
240 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
244 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
246 *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
252 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
253 const struct kvm_one_reg *reg)
255 unsigned long __user *uaddr =
256 (unsigned long __user *)(unsigned long)reg->addr;
257 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
259 KVM_REG_RISCV_SBI_EXT);
260 unsigned long reg_val, reg_subtype;
262 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
265 if (vcpu->arch.ran_atleast_once)
268 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
269 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
271 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
274 switch (reg_subtype) {
275 case KVM_REG_RISCV_SBI_SINGLE:
276 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
277 case KVM_REG_RISCV_SBI_MULTI_EN:
278 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
279 case KVM_REG_RISCV_SBI_MULTI_DIS:
280 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
288 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
289 const struct kvm_one_reg *reg)
292 unsigned long __user *uaddr =
293 (unsigned long __user *)(unsigned long)reg->addr;
294 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
296 KVM_REG_RISCV_SBI_EXT);
297 unsigned long reg_val, reg_subtype;
299 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
302 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
303 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
306 switch (reg_subtype) {
307 case KVM_REG_RISCV_SBI_SINGLE:
308 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
310 case KVM_REG_RISCV_SBI_MULTI_EN:
311 case KVM_REG_RISCV_SBI_MULTI_DIS:
312 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
313 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
322 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
328 int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
329 const struct kvm_one_reg *reg)
331 unsigned long __user *uaddr =
332 (unsigned long __user *)(unsigned long)reg->addr;
333 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
335 KVM_REG_RISCV_SBI_STATE);
336 unsigned long reg_subtype, reg_val;
338 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
341 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
344 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
345 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
347 switch (reg_subtype) {
348 case KVM_REG_RISCV_SBI_STA:
349 return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
357 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
358 const struct kvm_one_reg *reg)
360 unsigned long __user *uaddr =
361 (unsigned long __user *)(unsigned long)reg->addr;
362 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
364 KVM_REG_RISCV_SBI_STATE);
365 unsigned long reg_subtype, reg_val;
368 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
371 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
372 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
374 switch (reg_subtype) {
375 case KVM_REG_RISCV_SBI_STA:
376 ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, ®_val);
385 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
391 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
392 struct kvm_vcpu *vcpu, unsigned long extid)
394 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
395 const struct kvm_riscv_sbi_extension_entry *entry;
396 const struct kvm_vcpu_sbi_extension *ext;
399 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
401 ext = entry->ext_ptr;
403 if (ext->extid_start <= extid && ext->extid_end >= extid) {
404 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
405 scontext->ext_status[entry->ext_idx] ==
406 KVM_RISCV_SBI_EXT_STATUS_ENABLED)
416 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
419 bool next_sepc = true;
420 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
421 const struct kvm_vcpu_sbi_extension *sbi_ext;
422 struct kvm_cpu_trap utrap = {0};
423 struct kvm_vcpu_sbi_return sbi_ret = {
428 bool ext_is_v01 = false;
430 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
431 if (sbi_ext && sbi_ext->handler) {
432 #ifdef CONFIG_RISCV_SBI_V01
433 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
434 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
437 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
439 /* Return error for unsupported SBI calls */
440 cp->a0 = SBI_ERR_NOT_SUPPORTED;
445 * When the SBI extension returns a Linux error code, it exits the ioctl
446 * loop and forwards the error to userspace.
453 /* Handle special error cases i.e trap, exit or userspace forward */
454 if (sbi_ret.utrap->scause) {
455 /* No need to increment sepc or exit ioctl loop */
457 sbi_ret.utrap->sepc = cp->sepc;
458 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
463 /* Exit ioctl loop or Propagate the error code the guest */
468 cp->a0 = sbi_ret.err_val;
474 /* a1 should only be updated when we continue the ioctl loop */
475 if (!ext_is_v01 && ret == 1)
476 cp->a1 = sbi_ret.out_val;
481 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
483 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
484 const struct kvm_riscv_sbi_extension_entry *entry;
485 const struct kvm_vcpu_sbi_extension *ext;
488 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
490 ext = entry->ext_ptr;
492 if (ext->probe && !ext->probe(vcpu)) {
493 scontext->ext_status[entry->ext_idx] =
494 KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
498 scontext->ext_status[entry->ext_idx] = ext->default_disabled ?
499 KVM_RISCV_SBI_EXT_STATUS_DISABLED :
500 KVM_RISCV_SBI_EXT_STATUS_ENABLED;