1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_vcpu_sbi.h>
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
31 struct kvm_riscv_sbi_extension_entry {
32 enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 const struct kvm_vcpu_sbi_extension *ext_ptr;
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
38 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39 .ext_ptr = &vcpu_sbi_ext_v01,
42 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 .ext_ptr = &vcpu_sbi_ext_base,
46 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 .ext_ptr = &vcpu_sbi_ext_time,
50 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 .ext_ptr = &vcpu_sbi_ext_ipi,
54 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 .ext_ptr = &vcpu_sbi_ext_rfence,
58 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 .ext_ptr = &vcpu_sbi_ext_srst,
62 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 .ext_ptr = &vcpu_sbi_ext_hsm,
66 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 .ext_ptr = &vcpu_sbi_ext_pmu,
70 .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71 .ext_ptr = &vcpu_sbi_ext_dbcn,
74 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
75 .ext_ptr = &vcpu_sbi_ext_experimental,
78 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
79 .ext_ptr = &vcpu_sbi_ext_vendor,
83 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
85 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
87 vcpu->arch.sbi_context.return_handled = 0;
88 vcpu->stat.ecall_exit_stat++;
89 run->exit_reason = KVM_EXIT_RISCV_SBI;
90 run->riscv_sbi.extension_id = cp->a7;
91 run->riscv_sbi.function_id = cp->a6;
92 run->riscv_sbi.args[0] = cp->a0;
93 run->riscv_sbi.args[1] = cp->a1;
94 run->riscv_sbi.args[2] = cp->a2;
95 run->riscv_sbi.args[3] = cp->a3;
96 run->riscv_sbi.args[4] = cp->a4;
97 run->riscv_sbi.args[5] = cp->a5;
98 run->riscv_sbi.ret[0] = cp->a0;
99 run->riscv_sbi.ret[1] = cp->a1;
102 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
104 u32 type, u64 reason)
107 struct kvm_vcpu *tmp;
109 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
110 tmp->arch.power_off = true;
111 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
113 memset(&run->system_event, 0, sizeof(run->system_event));
114 run->system_event.type = type;
115 run->system_event.ndata = 1;
116 run->system_event.data[0] = reason;
117 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
120 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
122 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
124 /* Handle SBI return only once */
125 if (vcpu->arch.sbi_context.return_handled)
127 vcpu->arch.sbi_context.return_handled = 1;
129 /* Update return values */
130 cp->a0 = run->riscv_sbi.ret[0];
131 cp->a1 = run->riscv_sbi.ret[1];
133 /* Move to next instruction */
134 vcpu->arch.guest_context.sepc += 4;
139 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
140 unsigned long reg_num,
141 unsigned long reg_val)
144 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
145 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
147 if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
150 if (reg_val != 1 && reg_val != 0)
153 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
154 if (sbi_ext[i].ext_idx == reg_num) {
162 scontext->ext_status[sext->ext_idx] = (reg_val) ?
163 KVM_RISCV_SBI_EXT_AVAILABLE :
164 KVM_RISCV_SBI_EXT_UNAVAILABLE;
169 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
170 unsigned long reg_num,
171 unsigned long *reg_val)
174 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
175 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
177 if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
180 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
181 if (sbi_ext[i].ext_idx == reg_num) {
189 *reg_val = scontext->ext_status[sext->ext_idx] ==
190 KVM_RISCV_SBI_EXT_AVAILABLE;
194 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
195 unsigned long reg_num,
196 unsigned long reg_val, bool enable)
198 unsigned long i, ext_id;
200 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
203 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
204 ext_id = i + reg_num * BITS_PER_LONG;
205 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
208 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
214 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
215 unsigned long reg_num,
216 unsigned long *reg_val)
218 unsigned long i, ext_id, ext_val;
220 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
223 for (i = 0; i < BITS_PER_LONG; i++) {
224 ext_id = i + reg_num * BITS_PER_LONG;
225 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
229 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
231 *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
237 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
238 const struct kvm_one_reg *reg)
240 unsigned long __user *uaddr =
241 (unsigned long __user *)(unsigned long)reg->addr;
242 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
244 KVM_REG_RISCV_SBI_EXT);
245 unsigned long reg_val, reg_subtype;
247 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
250 if (vcpu->arch.ran_atleast_once)
253 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
254 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
256 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
259 switch (reg_subtype) {
260 case KVM_REG_RISCV_SBI_SINGLE:
261 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
262 case KVM_REG_RISCV_SBI_MULTI_EN:
263 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
264 case KVM_REG_RISCV_SBI_MULTI_DIS:
265 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
273 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
274 const struct kvm_one_reg *reg)
277 unsigned long __user *uaddr =
278 (unsigned long __user *)(unsigned long)reg->addr;
279 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
281 KVM_REG_RISCV_SBI_EXT);
282 unsigned long reg_val, reg_subtype;
284 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
287 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
288 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
291 switch (reg_subtype) {
292 case KVM_REG_RISCV_SBI_SINGLE:
293 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
295 case KVM_REG_RISCV_SBI_MULTI_EN:
296 case KVM_REG_RISCV_SBI_MULTI_DIS:
297 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
298 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
307 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
313 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
314 struct kvm_vcpu *vcpu, unsigned long extid)
316 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
317 const struct kvm_riscv_sbi_extension_entry *entry;
318 const struct kvm_vcpu_sbi_extension *ext;
321 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
323 ext = entry->ext_ptr;
325 if (ext->extid_start <= extid && ext->extid_end >= extid) {
326 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
327 scontext->ext_status[entry->ext_idx] ==
328 KVM_RISCV_SBI_EXT_AVAILABLE)
338 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
341 bool next_sepc = true;
342 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
343 const struct kvm_vcpu_sbi_extension *sbi_ext;
344 struct kvm_cpu_trap utrap = {0};
345 struct kvm_vcpu_sbi_return sbi_ret = {
350 bool ext_is_v01 = false;
352 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
353 if (sbi_ext && sbi_ext->handler) {
354 #ifdef CONFIG_RISCV_SBI_V01
355 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
356 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
359 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
361 /* Return error for unsupported SBI calls */
362 cp->a0 = SBI_ERR_NOT_SUPPORTED;
367 * When the SBI extension returns a Linux error code, it exits the ioctl
368 * loop and forwards the error to userspace.
375 /* Handle special error cases i.e trap, exit or userspace forward */
376 if (sbi_ret.utrap->scause) {
377 /* No need to increment sepc or exit ioctl loop */
379 sbi_ret.utrap->sepc = cp->sepc;
380 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
385 /* Exit ioctl loop or Propagate the error code the guest */
390 cp->a0 = sbi_ret.err_val;
396 /* a1 should only be updated when we continue the ioctl loop */
397 if (!ext_is_v01 && ret == 1)
398 cp->a1 = sbi_ret.out_val;
403 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
405 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
406 const struct kvm_riscv_sbi_extension_entry *entry;
407 const struct kvm_vcpu_sbi_extension *ext;
410 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
412 ext = entry->ext_ptr;
414 if (ext->probe && !ext->probe(vcpu)) {
415 scontext->ext_status[entry->ext_idx] =
416 KVM_RISCV_SBI_EXT_UNAVAILABLE;
420 scontext->ext_status[entry->ext_idx] = ext->default_unavail ?
421 KVM_RISCV_SBI_EXT_UNAVAILABLE :
422 KVM_RISCV_SBI_EXT_AVAILABLE;