1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
14 #include <asm/kvm_vcpu_timer.h>
15 #include <asm/kvm_vcpu_sbi.h>
17 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
18 unsigned long *out_val,
19 struct kvm_cpu_trap *utrap, bool *exit)
22 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
25 if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
28 #if __riscv_xlen == 32
29 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
31 next_cycle = (u64)cp->a0;
33 kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
38 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
39 .extid_start = SBI_EXT_TIME,
40 .extid_end = SBI_EXT_TIME,
41 .handler = kvm_sbi_ext_time_handler,
44 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
45 unsigned long *out_val,
46 struct kvm_cpu_trap *utrap, bool *exit)
51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
52 unsigned long hmask = cp->a0;
53 unsigned long hbase = cp->a1;
55 if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
58 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
60 if (tmp->vcpu_id < hbase)
62 if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
65 ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
73 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
74 .extid_start = SBI_EXT_IPI,
75 .extid_end = SBI_EXT_IPI,
76 .handler = kvm_sbi_ext_ipi_handler,
79 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
80 unsigned long *out_val,
81 struct kvm_cpu_trap *utrap, bool *exit)
84 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
85 unsigned long hmask = cp->a0;
86 unsigned long hbase = cp->a1;
87 unsigned long funcid = cp->a6;
90 case SBI_EXT_RFENCE_REMOTE_FENCE_I:
91 kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
93 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
94 if (cp->a2 == 0 && cp->a3 == 0)
95 kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
97 kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
98 cp->a2, cp->a3, PAGE_SHIFT);
100 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
101 if (cp->a2 == 0 && cp->a3 == 0)
102 kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
103 hbase, hmask, cp->a4);
105 kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
110 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
111 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
112 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
113 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
115 * Until nested virtualization is implemented, the
116 * SBI HFENCE calls should be treated as NOPs
126 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
127 .extid_start = SBI_EXT_RFENCE,
128 .extid_end = SBI_EXT_RFENCE,
129 .handler = kvm_sbi_ext_rfence_handler,
132 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
134 unsigned long *out_val,
135 struct kvm_cpu_trap *utrap, bool *exit)
137 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
138 unsigned long funcid = cp->a6;
144 case SBI_EXT_SRST_RESET:
146 case SBI_SRST_RESET_TYPE_SHUTDOWN:
147 kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
148 KVM_SYSTEM_EVENT_SHUTDOWN,
152 case SBI_SRST_RESET_TYPE_COLD_REBOOT:
153 case SBI_SRST_RESET_TYPE_WARM_REBOOT:
154 kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
155 KVM_SYSTEM_EVENT_RESET,
170 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
171 .extid_start = SBI_EXT_SRST,
172 .extid_end = SBI_EXT_SRST,
173 .handler = kvm_sbi_ext_srst_handler,