1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
20 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
22 #define KVM_ISA_EXT_ARR(ext) \
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 /* Single letter extensions (alphabetically sorted) */
28 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 /* Multi letter extensions (alphabetically sorted) */
37 KVM_ISA_EXT_ARR(SMSTATEEN),
38 KVM_ISA_EXT_ARR(SSAIA),
39 KVM_ISA_EXT_ARR(SSTC),
40 KVM_ISA_EXT_ARR(SVINVAL),
41 KVM_ISA_EXT_ARR(SVNAPOT),
42 KVM_ISA_EXT_ARR(SVPBMT),
46 KVM_ISA_EXT_ARR(ZBKB),
47 KVM_ISA_EXT_ARR(ZBKC),
48 KVM_ISA_EXT_ARR(ZBKX),
52 KVM_ISA_EXT_ARR(ZFHMIN),
53 KVM_ISA_EXT_ARR(ZICBOM),
54 KVM_ISA_EXT_ARR(ZICBOZ),
55 KVM_ISA_EXT_ARR(ZICNTR),
56 KVM_ISA_EXT_ARR(ZICOND),
57 KVM_ISA_EXT_ARR(ZICSR),
58 KVM_ISA_EXT_ARR(ZIFENCEI),
59 KVM_ISA_EXT_ARR(ZIHINTNTL),
60 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
61 KVM_ISA_EXT_ARR(ZIHPM),
62 KVM_ISA_EXT_ARR(ZKND),
63 KVM_ISA_EXT_ARR(ZKNE),
64 KVM_ISA_EXT_ARR(ZKNH),
66 KVM_ISA_EXT_ARR(ZKSED),
67 KVM_ISA_EXT_ARR(ZKSH),
69 KVM_ISA_EXT_ARR(ZVBB),
70 KVM_ISA_EXT_ARR(ZVBC),
71 KVM_ISA_EXT_ARR(ZVFH),
72 KVM_ISA_EXT_ARR(ZVFHMIN),
73 KVM_ISA_EXT_ARR(ZVKB),
74 KVM_ISA_EXT_ARR(ZVKG),
75 KVM_ISA_EXT_ARR(ZVKNED),
76 KVM_ISA_EXT_ARR(ZVKNHA),
77 KVM_ISA_EXT_ARR(ZVKNHB),
78 KVM_ISA_EXT_ARR(ZVKSED),
79 KVM_ISA_EXT_ARR(ZVKSH),
80 KVM_ISA_EXT_ARR(ZVKT),
83 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
87 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
88 if (kvm_isa_ext_arr[i] == base_ext)
92 return KVM_RISCV_ISA_EXT_MAX;
95 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
98 case KVM_RISCV_ISA_EXT_H:
100 case KVM_RISCV_ISA_EXT_V:
101 return riscv_v_vstate_ctrl_user_allowed();
109 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
112 /* Extensions which don't have any mechanism to disable */
113 case KVM_RISCV_ISA_EXT_A:
114 case KVM_RISCV_ISA_EXT_C:
115 case KVM_RISCV_ISA_EXT_I:
116 case KVM_RISCV_ISA_EXT_M:
117 case KVM_RISCV_ISA_EXT_SSTC:
118 case KVM_RISCV_ISA_EXT_SVINVAL:
119 case KVM_RISCV_ISA_EXT_SVNAPOT:
120 case KVM_RISCV_ISA_EXT_ZBA:
121 case KVM_RISCV_ISA_EXT_ZBB:
122 case KVM_RISCV_ISA_EXT_ZBC:
123 case KVM_RISCV_ISA_EXT_ZBKB:
124 case KVM_RISCV_ISA_EXT_ZBKC:
125 case KVM_RISCV_ISA_EXT_ZBKX:
126 case KVM_RISCV_ISA_EXT_ZBS:
127 case KVM_RISCV_ISA_EXT_ZFA:
128 case KVM_RISCV_ISA_EXT_ZFH:
129 case KVM_RISCV_ISA_EXT_ZFHMIN:
130 case KVM_RISCV_ISA_EXT_ZICNTR:
131 case KVM_RISCV_ISA_EXT_ZICOND:
132 case KVM_RISCV_ISA_EXT_ZICSR:
133 case KVM_RISCV_ISA_EXT_ZIFENCEI:
134 case KVM_RISCV_ISA_EXT_ZIHINTNTL:
135 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
136 case KVM_RISCV_ISA_EXT_ZIHPM:
137 case KVM_RISCV_ISA_EXT_ZKND:
138 case KVM_RISCV_ISA_EXT_ZKNE:
139 case KVM_RISCV_ISA_EXT_ZKNH:
140 case KVM_RISCV_ISA_EXT_ZKR:
141 case KVM_RISCV_ISA_EXT_ZKSED:
142 case KVM_RISCV_ISA_EXT_ZKSH:
143 case KVM_RISCV_ISA_EXT_ZKT:
144 case KVM_RISCV_ISA_EXT_ZVBB:
145 case KVM_RISCV_ISA_EXT_ZVBC:
146 case KVM_RISCV_ISA_EXT_ZVFH:
147 case KVM_RISCV_ISA_EXT_ZVFHMIN:
148 case KVM_RISCV_ISA_EXT_ZVKB:
149 case KVM_RISCV_ISA_EXT_ZVKG:
150 case KVM_RISCV_ISA_EXT_ZVKNED:
151 case KVM_RISCV_ISA_EXT_ZVKNHA:
152 case KVM_RISCV_ISA_EXT_ZVKNHB:
153 case KVM_RISCV_ISA_EXT_ZVKSED:
154 case KVM_RISCV_ISA_EXT_ZVKSH:
155 case KVM_RISCV_ISA_EXT_ZVKT:
157 /* Extensions which can be disabled using Smstateen */
158 case KVM_RISCV_ISA_EXT_SSAIA:
159 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
167 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
169 unsigned long host_isa, i;
171 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
172 host_isa = kvm_isa_ext_arr[i];
173 if (__riscv_isa_extension_available(NULL, host_isa) &&
174 kvm_riscv_vcpu_isa_enable_allowed(i))
175 set_bit(host_isa, vcpu->arch.isa);
179 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
180 const struct kvm_one_reg *reg)
182 unsigned long __user *uaddr =
183 (unsigned long __user *)(unsigned long)reg->addr;
184 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
186 KVM_REG_RISCV_CONFIG);
187 unsigned long reg_val;
189 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
193 case KVM_REG_RISCV_CONFIG_REG(isa):
194 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
196 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
197 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
199 reg_val = riscv_cbom_block_size;
201 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
202 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
204 reg_val = riscv_cboz_block_size;
206 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
207 reg_val = vcpu->arch.mvendorid;
209 case KVM_REG_RISCV_CONFIG_REG(marchid):
210 reg_val = vcpu->arch.marchid;
212 case KVM_REG_RISCV_CONFIG_REG(mimpid):
213 reg_val = vcpu->arch.mimpid;
215 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
216 reg_val = satp_mode >> SATP_MODE_SHIFT;
222 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
228 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
229 const struct kvm_one_reg *reg)
231 unsigned long __user *uaddr =
232 (unsigned long __user *)(unsigned long)reg->addr;
233 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
235 KVM_REG_RISCV_CONFIG);
236 unsigned long i, isa_ext, reg_val;
238 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
241 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
245 case KVM_REG_RISCV_CONFIG_REG(isa):
247 * This ONE REG interface is only defined for
248 * single letter extensions.
250 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
254 * Return early (i.e. do nothing) if reg_val is the same
255 * value retrievable via kvm_riscv_vcpu_get_reg_config().
257 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
260 if (!vcpu->arch.ran_atleast_once) {
261 /* Ignore the enable/disable request for certain extensions */
262 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
263 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
264 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
268 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
269 if (reg_val & BIT(i))
271 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
272 if (!(reg_val & BIT(i)))
275 reg_val &= riscv_isa_extension_base(NULL);
276 /* Do not modify anything beyond single letter extensions */
277 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
278 (reg_val & KVM_RISCV_BASE_ISA_MASK);
279 vcpu->arch.isa[0] = reg_val;
280 kvm_riscv_vcpu_fp_reset(vcpu);
285 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
286 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
288 if (reg_val != riscv_cbom_block_size)
291 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
292 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
294 if (reg_val != riscv_cboz_block_size)
297 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
298 if (reg_val == vcpu->arch.mvendorid)
300 if (!vcpu->arch.ran_atleast_once)
301 vcpu->arch.mvendorid = reg_val;
305 case KVM_REG_RISCV_CONFIG_REG(marchid):
306 if (reg_val == vcpu->arch.marchid)
308 if (!vcpu->arch.ran_atleast_once)
309 vcpu->arch.marchid = reg_val;
313 case KVM_REG_RISCV_CONFIG_REG(mimpid):
314 if (reg_val == vcpu->arch.mimpid)
316 if (!vcpu->arch.ran_atleast_once)
317 vcpu->arch.mimpid = reg_val;
321 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
322 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
332 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
333 const struct kvm_one_reg *reg)
335 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
336 unsigned long __user *uaddr =
337 (unsigned long __user *)(unsigned long)reg->addr;
338 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
341 unsigned long reg_val;
343 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
345 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
348 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
349 reg_val = cntx->sepc;
350 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
351 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
352 reg_val = ((unsigned long *)cntx)[reg_num];
353 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
354 reg_val = (cntx->sstatus & SR_SPP) ?
355 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
359 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
365 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
366 const struct kvm_one_reg *reg)
368 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
369 unsigned long __user *uaddr =
370 (unsigned long __user *)(unsigned long)reg->addr;
371 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
374 unsigned long reg_val;
376 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
378 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
381 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
384 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
385 cntx->sepc = reg_val;
386 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
387 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
388 ((unsigned long *)cntx)[reg_num] = reg_val;
389 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
390 if (reg_val == KVM_RISCV_MODE_S)
391 cntx->sstatus |= SR_SPP;
393 cntx->sstatus &= ~SR_SPP;
400 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
401 unsigned long reg_num,
402 unsigned long *out_val)
404 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
406 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
409 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
410 kvm_riscv_vcpu_flush_interrupts(vcpu);
411 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
412 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
414 *out_val = ((unsigned long *)csr)[reg_num];
419 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
420 unsigned long reg_num,
421 unsigned long reg_val)
423 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
425 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
428 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
429 reg_val &= VSIP_VALID_MASK;
430 reg_val <<= VSIP_TO_HVIP_SHIFT;
433 ((unsigned long *)csr)[reg_num] = reg_val;
435 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
436 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
441 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
442 unsigned long reg_num,
443 unsigned long reg_val)
445 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
447 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
448 sizeof(unsigned long))
451 ((unsigned long *)csr)[reg_num] = reg_val;
455 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
456 unsigned long reg_num,
457 unsigned long *out_val)
459 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
461 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
462 sizeof(unsigned long))
465 *out_val = ((unsigned long *)csr)[reg_num];
469 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
470 const struct kvm_one_reg *reg)
473 unsigned long __user *uaddr =
474 (unsigned long __user *)(unsigned long)reg->addr;
475 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
478 unsigned long reg_val, reg_subtype;
480 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
483 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
484 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
485 switch (reg_subtype) {
486 case KVM_REG_RISCV_CSR_GENERAL:
487 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
489 case KVM_REG_RISCV_CSR_AIA:
490 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
492 case KVM_REG_RISCV_CSR_SMSTATEEN:
494 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
495 rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
505 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
511 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
512 const struct kvm_one_reg *reg)
515 unsigned long __user *uaddr =
516 (unsigned long __user *)(unsigned long)reg->addr;
517 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
520 unsigned long reg_val, reg_subtype;
522 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
525 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
528 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
529 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
530 switch (reg_subtype) {
531 case KVM_REG_RISCV_CSR_GENERAL:
532 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
534 case KVM_REG_RISCV_CSR_AIA:
535 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
537 case KVM_REG_RISCV_CSR_SMSTATEEN:
539 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
540 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
553 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
554 unsigned long reg_num,
555 unsigned long *reg_val)
557 unsigned long host_isa_ext;
559 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
560 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
563 host_isa_ext = kvm_isa_ext_arr[reg_num];
564 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
568 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
569 *reg_val = 1; /* Mark the given extension as available */
574 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
575 unsigned long reg_num,
576 unsigned long reg_val)
578 unsigned long host_isa_ext;
580 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
581 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
584 host_isa_ext = kvm_isa_ext_arr[reg_num];
585 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
588 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
591 if (!vcpu->arch.ran_atleast_once) {
593 * All multi-letter extension and a few single letter
594 * extension can be disabled
597 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
598 set_bit(host_isa_ext, vcpu->arch.isa);
600 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
601 clear_bit(host_isa_ext, vcpu->arch.isa);
604 kvm_riscv_vcpu_fp_reset(vcpu);
612 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
613 unsigned long reg_num,
614 unsigned long *reg_val)
616 unsigned long i, ext_id, ext_val;
618 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
621 for (i = 0; i < BITS_PER_LONG; i++) {
622 ext_id = i + reg_num * BITS_PER_LONG;
623 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
627 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
629 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
635 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
636 unsigned long reg_num,
637 unsigned long reg_val, bool enable)
639 unsigned long i, ext_id;
641 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
644 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
645 ext_id = i + reg_num * BITS_PER_LONG;
646 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
649 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
655 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
656 const struct kvm_one_reg *reg)
659 unsigned long __user *uaddr =
660 (unsigned long __user *)(unsigned long)reg->addr;
661 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
663 KVM_REG_RISCV_ISA_EXT);
664 unsigned long reg_val, reg_subtype;
666 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
669 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
670 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
673 switch (reg_subtype) {
674 case KVM_REG_RISCV_ISA_SINGLE:
675 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
677 case KVM_REG_RISCV_ISA_MULTI_EN:
678 case KVM_REG_RISCV_ISA_MULTI_DIS:
679 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
680 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
689 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
695 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
696 const struct kvm_one_reg *reg)
698 unsigned long __user *uaddr =
699 (unsigned long __user *)(unsigned long)reg->addr;
700 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
702 KVM_REG_RISCV_ISA_EXT);
703 unsigned long reg_val, reg_subtype;
705 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
708 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
709 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
711 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
714 switch (reg_subtype) {
715 case KVM_REG_RISCV_ISA_SINGLE:
716 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
717 case KVM_REG_RISCV_SBI_MULTI_EN:
718 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
719 case KVM_REG_RISCV_SBI_MULTI_DIS:
720 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
728 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
729 u64 __user *uindices)
733 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
739 * Avoid reporting config reg if the corresponding extension
742 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
743 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
745 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
746 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
749 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
750 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
753 if (put_user(reg, uindices))
764 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
766 return copy_config_reg_indices(vcpu, NULL);
769 static inline unsigned long num_core_regs(void)
771 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
774 static int copy_core_reg_indices(u64 __user *uindices)
776 int n = num_core_regs();
778 for (int i = 0; i < n; i++) {
779 u64 size = IS_ENABLED(CONFIG_32BIT) ?
780 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
781 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
784 if (put_user(reg, uindices))
793 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
795 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
797 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
798 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
799 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
800 n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
805 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
806 u64 __user *uindices)
808 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
811 /* copy general csr regs */
812 for (int i = 0; i < n1; i++) {
813 u64 size = IS_ENABLED(CONFIG_32BIT) ?
814 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
815 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
816 KVM_REG_RISCV_CSR_GENERAL | i;
819 if (put_user(reg, uindices))
825 /* copy AIA csr regs */
826 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
827 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
829 for (int i = 0; i < n2; i++) {
830 u64 size = IS_ENABLED(CONFIG_32BIT) ?
831 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
832 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
833 KVM_REG_RISCV_CSR_AIA | i;
836 if (put_user(reg, uindices))
843 /* copy Smstateen csr regs */
844 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
845 n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
847 for (int i = 0; i < n3; i++) {
848 u64 size = IS_ENABLED(CONFIG_32BIT) ?
849 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
850 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
851 KVM_REG_RISCV_CSR_SMSTATEEN | i;
854 if (put_user(reg, uindices))
864 static inline unsigned long num_timer_regs(void)
866 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
869 static int copy_timer_reg_indices(u64 __user *uindices)
871 int n = num_timer_regs();
873 for (int i = 0; i < n; i++) {
874 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
875 KVM_REG_RISCV_TIMER | i;
878 if (put_user(reg, uindices))
887 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
889 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
891 if (riscv_isa_extension_available(vcpu->arch.isa, f))
892 return sizeof(cntx->fp.f) / sizeof(u32);
897 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
898 u64 __user *uindices)
900 int n = num_fp_f_regs(vcpu);
902 for (int i = 0; i < n; i++) {
903 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
904 KVM_REG_RISCV_FP_F | i;
907 if (put_user(reg, uindices))
916 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
918 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
920 if (riscv_isa_extension_available(vcpu->arch.isa, d))
921 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
926 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
927 u64 __user *uindices)
930 int n = num_fp_d_regs(vcpu);
933 /* copy fp.d.f indices */
934 for (i = 0; i < n-1; i++) {
935 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
936 KVM_REG_RISCV_FP_D | i;
939 if (put_user(reg, uindices))
945 /* copy fp.d.fcsr indices */
946 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
948 if (put_user(reg, uindices))
956 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
957 u64 __user *uindices)
960 unsigned long isa_ext;
962 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
963 u64 size = IS_ENABLED(CONFIG_32BIT) ?
964 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
965 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
967 isa_ext = kvm_isa_ext_arr[i];
968 if (!__riscv_isa_extension_available(NULL, isa_ext))
972 if (put_user(reg, uindices))
983 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
985 return copy_isa_ext_reg_indices(vcpu, NULL);;
988 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
992 for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
993 u64 size = IS_ENABLED(CONFIG_32BIT) ?
994 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
995 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
996 KVM_REG_RISCV_SBI_SINGLE | i;
998 if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1002 if (put_user(reg, uindices))
1013 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1015 return copy_sbi_ext_reg_indices(vcpu, NULL);
1018 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1020 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1023 if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1024 u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1025 int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1027 for (int i = 0; i < n; i++) {
1028 u64 reg = KVM_REG_RISCV | size |
1029 KVM_REG_RISCV_SBI_STATE |
1030 KVM_REG_RISCV_SBI_STA | i;
1033 if (put_user(reg, uindices))
1045 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1047 return copy_sbi_reg_indices(vcpu, NULL);
1050 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1052 if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1055 /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1059 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1060 u64 __user *uindices)
1062 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1063 int n = num_vector_regs(vcpu);
1070 /* copy vstart, vl, vtype, vcsr and vlenb */
1071 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1072 for (i = 0; i < 5; i++) {
1073 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1076 if (put_user(reg, uindices))
1082 /* vector_regs have a variable 'vlenb' size */
1083 size = __builtin_ctzl(cntx->vector.vlenb);
1084 size <<= KVM_REG_SIZE_SHIFT;
1085 for (i = 0; i < 32; i++) {
1086 reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1087 KVM_REG_RISCV_VECTOR_REG(i);
1090 if (put_user(reg, uindices))
1100 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1102 * This is for all registers.
1104 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1106 unsigned long res = 0;
1108 res += num_config_regs(vcpu);
1109 res += num_core_regs();
1110 res += num_csr_regs(vcpu);
1111 res += num_timer_regs();
1112 res += num_fp_f_regs(vcpu);
1113 res += num_fp_d_regs(vcpu);
1114 res += num_vector_regs(vcpu);
1115 res += num_isa_ext_regs(vcpu);
1116 res += num_sbi_ext_regs(vcpu);
1117 res += num_sbi_regs(vcpu);
1123 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1125 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1126 u64 __user *uindices)
1130 ret = copy_config_reg_indices(vcpu, uindices);
1135 ret = copy_core_reg_indices(uindices);
1140 ret = copy_csr_reg_indices(vcpu, uindices);
1145 ret = copy_timer_reg_indices(uindices);
1150 ret = copy_fp_f_reg_indices(vcpu, uindices);
1155 ret = copy_fp_d_reg_indices(vcpu, uindices);
1160 ret = copy_vector_reg_indices(vcpu, uindices);
1165 ret = copy_isa_ext_reg_indices(vcpu, uindices);
1170 ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1175 ret = copy_sbi_reg_indices(vcpu, uindices);
1183 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1184 const struct kvm_one_reg *reg)
1186 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1187 case KVM_REG_RISCV_CONFIG:
1188 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1189 case KVM_REG_RISCV_CORE:
1190 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1191 case KVM_REG_RISCV_CSR:
1192 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1193 case KVM_REG_RISCV_TIMER:
1194 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1195 case KVM_REG_RISCV_FP_F:
1196 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1197 KVM_REG_RISCV_FP_F);
1198 case KVM_REG_RISCV_FP_D:
1199 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1200 KVM_REG_RISCV_FP_D);
1201 case KVM_REG_RISCV_VECTOR:
1202 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1203 case KVM_REG_RISCV_ISA_EXT:
1204 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1205 case KVM_REG_RISCV_SBI_EXT:
1206 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1207 case KVM_REG_RISCV_SBI_STATE:
1208 return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1216 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1217 const struct kvm_one_reg *reg)
1219 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1220 case KVM_REG_RISCV_CONFIG:
1221 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1222 case KVM_REG_RISCV_CORE:
1223 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1224 case KVM_REG_RISCV_CSR:
1225 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1226 case KVM_REG_RISCV_TIMER:
1227 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1228 case KVM_REG_RISCV_FP_F:
1229 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1230 KVM_REG_RISCV_FP_F);
1231 case KVM_REG_RISCV_FP_D:
1232 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1233 KVM_REG_RISCV_FP_D);
1234 case KVM_REG_RISCV_VECTOR:
1235 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1236 case KVM_REG_RISCV_ISA_EXT:
1237 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1238 case KVM_REG_RISCV_SBI_EXT:
1239 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1240 case KVM_REG_RISCV_SBI_STATE:
1241 return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);