1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
18 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
20 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
21 gpa_t gpa, gpa_t gpsz,
26 if (PTRS_PER_PTE < (gpsz >> order)) {
27 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
32 asm volatile (SFENCE_W_INVAL() ::: "memory");
33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
34 asm volatile (HINVAL_GVMA(%0, %1)
35 : : "r" (pos >> 2), "r" (vmid) : "memory");
36 asm volatile (SFENCE_INVAL_IR() ::: "memory");
38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
39 asm volatile (HFENCE_GVMA(%0, %1)
40 : : "r" (pos >> 2), "r" (vmid) : "memory");
44 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
46 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
54 if (PTRS_PER_PTE < (gpsz >> order)) {
55 kvm_riscv_local_hfence_gvma_all();
60 asm volatile (SFENCE_W_INVAL() ::: "memory");
61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
62 asm volatile(HINVAL_GVMA(%0, zero)
63 : : "r" (pos >> 2) : "memory");
64 asm volatile (SFENCE_INVAL_IR() ::: "memory");
66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
67 asm volatile(HFENCE_GVMA(%0, zero)
68 : : "r" (pos >> 2) : "memory");
72 void kvm_riscv_local_hfence_gvma_all(void)
74 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
77 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
83 unsigned long pos, hgatp;
85 if (PTRS_PER_PTE < (gvsz >> order)) {
86 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
90 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
93 asm volatile (SFENCE_W_INVAL() ::: "memory");
94 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
95 asm volatile(HINVAL_VVMA(%0, %1)
96 : : "r" (pos), "r" (asid) : "memory");
97 asm volatile (SFENCE_INVAL_IR() ::: "memory");
99 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
100 asm volatile(HFENCE_VVMA(%0, %1)
101 : : "r" (pos), "r" (asid) : "memory");
104 csr_write(CSR_HGATP, hgatp);
107 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
112 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
114 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
116 csr_write(CSR_HGATP, hgatp);
119 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
120 unsigned long gva, unsigned long gvsz,
123 unsigned long pos, hgatp;
125 if (PTRS_PER_PTE < (gvsz >> order)) {
126 kvm_riscv_local_hfence_vvma_all(vmid);
130 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
133 asm volatile (SFENCE_W_INVAL() ::: "memory");
134 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
135 asm volatile(HINVAL_VVMA(%0, zero)
136 : : "r" (pos) : "memory");
137 asm volatile (SFENCE_INVAL_IR() ::: "memory");
139 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
140 asm volatile(HFENCE_VVMA(%0, zero)
141 : : "r" (pos) : "memory");
144 csr_write(CSR_HGATP, hgatp);
147 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
151 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
153 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
155 csr_write(CSR_HGATP, hgatp);
158 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
162 if (!kvm_riscv_gstage_vmid_bits() ||
163 vcpu->arch.last_exit_cpu == vcpu->cpu)
167 * On RISC-V platforms with hardware VMID support, we share same
168 * VMID for all VCPUs of a particular Guest/VM. This means we might
169 * have stale G-stage TLB entries on the current Host CPU due to
170 * some other VCPU of the same Guest which ran previously on the
173 * To cleanup stale TLB entries, we simply flush all G-stage TLB
174 * entries by VMID whenever underlying Host CPU changes for a VCPU.
177 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
178 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
181 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
183 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
184 local_flush_icache_all();
187 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
189 struct kvm_vmid *vmid;
191 vmid = &vcpu->kvm->arch.vmid;
192 kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
195 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
197 struct kvm_vmid *vmid;
199 vmid = &vcpu->kvm->arch.vmid;
200 kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
203 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
204 struct kvm_riscv_hfence *out_data)
207 struct kvm_vcpu_arch *varch = &vcpu->arch;
209 spin_lock(&varch->hfence_lock);
211 if (varch->hfence_queue[varch->hfence_head].type) {
212 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
214 varch->hfence_queue[varch->hfence_head].type = 0;
216 varch->hfence_head++;
217 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
218 varch->hfence_head = 0;
223 spin_unlock(&varch->hfence_lock);
228 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
229 const struct kvm_riscv_hfence *data)
232 struct kvm_vcpu_arch *varch = &vcpu->arch;
234 spin_lock(&varch->hfence_lock);
236 if (!varch->hfence_queue[varch->hfence_tail].type) {
237 memcpy(&varch->hfence_queue[varch->hfence_tail],
238 data, sizeof(*data));
240 varch->hfence_tail++;
241 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
242 varch->hfence_tail = 0;
247 spin_unlock(&varch->hfence_lock);
252 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
254 struct kvm_riscv_hfence d = { 0 };
255 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
257 while (vcpu_hfence_dequeue(vcpu, &d)) {
259 case KVM_RISCV_HFENCE_UNKNOWN:
261 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
262 kvm_riscv_local_hfence_gvma_vmid_gpa(
264 d.addr, d.size, d.order);
266 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
267 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
268 kvm_riscv_local_hfence_vvma_asid_gva(
269 READ_ONCE(v->vmid), d.asid,
270 d.addr, d.size, d.order);
272 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
273 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
274 kvm_riscv_local_hfence_vvma_asid_all(
275 READ_ONCE(v->vmid), d.asid);
277 case KVM_RISCV_HFENCE_VVMA_GVA:
278 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
279 kvm_riscv_local_hfence_vvma_gva(
281 d.addr, d.size, d.order);
289 static void make_xfence_request(struct kvm *kvm,
290 unsigned long hbase, unsigned long hmask,
291 unsigned int req, unsigned int fallback_req,
292 const struct kvm_riscv_hfence *data)
295 struct kvm_vcpu *vcpu;
296 unsigned int actual_req = req;
297 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
299 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
300 kvm_for_each_vcpu(i, vcpu, kvm) {
302 if (vcpu->vcpu_id < hbase)
304 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
308 bitmap_set(vcpu_mask, i, 1);
310 if (!data || !data->type)
314 * Enqueue hfence data to VCPU hfence queue. If we don't
315 * have space in the VCPU hfence queue then fallback to
316 * a more conservative hfence request.
318 if (!vcpu_hfence_enqueue(vcpu, data))
319 actual_req = fallback_req;
322 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
325 void kvm_riscv_fence_i(struct kvm *kvm,
326 unsigned long hbase, unsigned long hmask)
328 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
329 KVM_REQ_FENCE_I, NULL);
332 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
333 unsigned long hbase, unsigned long hmask,
334 gpa_t gpa, gpa_t gpsz,
337 struct kvm_riscv_hfence data;
339 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
344 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
345 KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
348 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
349 unsigned long hbase, unsigned long hmask)
351 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
352 KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
355 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
356 unsigned long hbase, unsigned long hmask,
357 unsigned long gva, unsigned long gvsz,
358 unsigned long order, unsigned long asid)
360 struct kvm_riscv_hfence data;
362 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
367 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
368 KVM_REQ_HFENCE_VVMA_ALL, &data);
371 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
372 unsigned long hbase, unsigned long hmask,
375 struct kvm_riscv_hfence data;
377 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
379 data.addr = data.size = data.order = 0;
380 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
381 KVM_REQ_HFENCE_VVMA_ALL, &data);
384 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
385 unsigned long hbase, unsigned long hmask,
386 unsigned long gva, unsigned long gvsz,
389 struct kvm_riscv_hfence data;
391 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
396 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
397 KVM_REQ_HFENCE_VVMA_ALL, &data);
400 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
401 unsigned long hbase, unsigned long hmask)
403 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
404 KVM_REQ_HFENCE_VVMA_ALL, NULL);