1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #include <linux/atomic.h>
11 #include <linux/bitmap.h>
12 #include <linux/kvm_host.h>
13 #include <linux/math.h>
14 #include <linux/spinlock.h>
15 #include <linux/swab.h>
16 #include <kvm/iodev.h>
18 #include <asm/kvm_aia_imsic.h>
20 #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
22 struct imsic_mrif_eix {
23 unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG];
24 unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG];
28 struct imsic_mrif_eix eix[IMSIC_MAX_EIX];
29 unsigned long eithreshold;
30 unsigned long eidelivery;
34 struct kvm_io_device iodev;
41 * At any point in time, the register state is in
42 * one of the following places:
44 * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
45 * 2) Software: IMSIC SW-file (vsfile_cpu < 0)
52 void __iomem *vsfile_va;
53 phys_addr_t vsfile_pa;
56 struct imsic_mrif *swfile;
57 phys_addr_t swfile_pa;
58 spinlock_t swfile_extirq_lock;
61 #define imsic_vs_csr_read(__c) \
64 csr_write(CSR_VSISELECT, __c); \
65 __r = csr_read(CSR_VSIREG); \
69 #define imsic_read_switchcase(__ireg) \
71 return imsic_vs_csr_read(__ireg);
72 #define imsic_read_switchcase_2(__ireg) \
73 imsic_read_switchcase(__ireg + 0) \
74 imsic_read_switchcase(__ireg + 1)
75 #define imsic_read_switchcase_4(__ireg) \
76 imsic_read_switchcase_2(__ireg + 0) \
77 imsic_read_switchcase_2(__ireg + 2)
78 #define imsic_read_switchcase_8(__ireg) \
79 imsic_read_switchcase_4(__ireg + 0) \
80 imsic_read_switchcase_4(__ireg + 4)
81 #define imsic_read_switchcase_16(__ireg) \
82 imsic_read_switchcase_8(__ireg + 0) \
83 imsic_read_switchcase_8(__ireg + 8)
84 #define imsic_read_switchcase_32(__ireg) \
85 imsic_read_switchcase_16(__ireg + 0) \
86 imsic_read_switchcase_16(__ireg + 16)
87 #define imsic_read_switchcase_64(__ireg) \
88 imsic_read_switchcase_32(__ireg + 0) \
89 imsic_read_switchcase_32(__ireg + 32)
91 static unsigned long imsic_eix_read(int ireg)
94 imsic_read_switchcase_64(IMSIC_EIP0)
95 imsic_read_switchcase_64(IMSIC_EIE0)
101 #define imsic_vs_csr_swap(__c, __v) \
104 csr_write(CSR_VSISELECT, __c); \
105 __r = csr_swap(CSR_VSIREG, __v); \
109 #define imsic_swap_switchcase(__ireg, __v) \
111 return imsic_vs_csr_swap(__ireg, __v);
112 #define imsic_swap_switchcase_2(__ireg, __v) \
113 imsic_swap_switchcase(__ireg + 0, __v) \
114 imsic_swap_switchcase(__ireg + 1, __v)
115 #define imsic_swap_switchcase_4(__ireg, __v) \
116 imsic_swap_switchcase_2(__ireg + 0, __v) \
117 imsic_swap_switchcase_2(__ireg + 2, __v)
118 #define imsic_swap_switchcase_8(__ireg, __v) \
119 imsic_swap_switchcase_4(__ireg + 0, __v) \
120 imsic_swap_switchcase_4(__ireg + 4, __v)
121 #define imsic_swap_switchcase_16(__ireg, __v) \
122 imsic_swap_switchcase_8(__ireg + 0, __v) \
123 imsic_swap_switchcase_8(__ireg + 8, __v)
124 #define imsic_swap_switchcase_32(__ireg, __v) \
125 imsic_swap_switchcase_16(__ireg + 0, __v) \
126 imsic_swap_switchcase_16(__ireg + 16, __v)
127 #define imsic_swap_switchcase_64(__ireg, __v) \
128 imsic_swap_switchcase_32(__ireg + 0, __v) \
129 imsic_swap_switchcase_32(__ireg + 32, __v)
131 static unsigned long imsic_eix_swap(int ireg, unsigned long val)
134 imsic_swap_switchcase_64(IMSIC_EIP0, val)
135 imsic_swap_switchcase_64(IMSIC_EIE0, val)
141 #define imsic_vs_csr_write(__c, __v) \
143 csr_write(CSR_VSISELECT, __c); \
144 csr_write(CSR_VSIREG, __v); \
147 #define imsic_write_switchcase(__ireg, __v) \
149 imsic_vs_csr_write(__ireg, __v); \
151 #define imsic_write_switchcase_2(__ireg, __v) \
152 imsic_write_switchcase(__ireg + 0, __v) \
153 imsic_write_switchcase(__ireg + 1, __v)
154 #define imsic_write_switchcase_4(__ireg, __v) \
155 imsic_write_switchcase_2(__ireg + 0, __v) \
156 imsic_write_switchcase_2(__ireg + 2, __v)
157 #define imsic_write_switchcase_8(__ireg, __v) \
158 imsic_write_switchcase_4(__ireg + 0, __v) \
159 imsic_write_switchcase_4(__ireg + 4, __v)
160 #define imsic_write_switchcase_16(__ireg, __v) \
161 imsic_write_switchcase_8(__ireg + 0, __v) \
162 imsic_write_switchcase_8(__ireg + 8, __v)
163 #define imsic_write_switchcase_32(__ireg, __v) \
164 imsic_write_switchcase_16(__ireg + 0, __v) \
165 imsic_write_switchcase_16(__ireg + 16, __v)
166 #define imsic_write_switchcase_64(__ireg, __v) \
167 imsic_write_switchcase_32(__ireg + 0, __v) \
168 imsic_write_switchcase_32(__ireg + 32, __v)
170 static void imsic_eix_write(int ireg, unsigned long val)
173 imsic_write_switchcase_64(IMSIC_EIP0, val)
174 imsic_write_switchcase_64(IMSIC_EIE0, val)
178 #define imsic_vs_csr_set(__c, __v) \
180 csr_write(CSR_VSISELECT, __c); \
181 csr_set(CSR_VSIREG, __v); \
184 #define imsic_set_switchcase(__ireg, __v) \
186 imsic_vs_csr_set(__ireg, __v); \
188 #define imsic_set_switchcase_2(__ireg, __v) \
189 imsic_set_switchcase(__ireg + 0, __v) \
190 imsic_set_switchcase(__ireg + 1, __v)
191 #define imsic_set_switchcase_4(__ireg, __v) \
192 imsic_set_switchcase_2(__ireg + 0, __v) \
193 imsic_set_switchcase_2(__ireg + 2, __v)
194 #define imsic_set_switchcase_8(__ireg, __v) \
195 imsic_set_switchcase_4(__ireg + 0, __v) \
196 imsic_set_switchcase_4(__ireg + 4, __v)
197 #define imsic_set_switchcase_16(__ireg, __v) \
198 imsic_set_switchcase_8(__ireg + 0, __v) \
199 imsic_set_switchcase_8(__ireg + 8, __v)
200 #define imsic_set_switchcase_32(__ireg, __v) \
201 imsic_set_switchcase_16(__ireg + 0, __v) \
202 imsic_set_switchcase_16(__ireg + 16, __v)
203 #define imsic_set_switchcase_64(__ireg, __v) \
204 imsic_set_switchcase_32(__ireg + 0, __v) \
205 imsic_set_switchcase_32(__ireg + 32, __v)
207 static void imsic_eix_set(int ireg, unsigned long val)
210 imsic_set_switchcase_64(IMSIC_EIP0, val)
211 imsic_set_switchcase_64(IMSIC_EIE0, val)
215 static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif,
217 unsigned long new_val,
218 unsigned long wr_mask)
220 unsigned long old_val = 0, tmp = 0;
222 __asm__ __volatile__ (
223 "0: lr.w.aq %1, %0\n"
226 " sc.w.rl %2, %2, %0\n"
228 : "+A" (*ptr), "+r" (old_val), "+r" (tmp)
229 : "r" (~wr_mask), "r" (new_val & wr_mask)
235 static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif,
239 return atomic_long_fetch_or(val, (atomic_long_t *)ptr);
242 #define imsic_mrif_atomic_write(__mrif, __ptr, __new_val) \
243 imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL)
244 #define imsic_mrif_atomic_read(__mrif, __ptr) \
245 imsic_mrif_atomic_or(__mrif, __ptr, 0)
247 static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis)
249 struct imsic_mrif_eix *eix;
250 u32 i, imin, imax, ei, max_msi;
251 unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG];
252 unsigned long eithreshold = imsic_mrif_atomic_read(mrif,
255 max_msi = (eithreshold && (eithreshold <= nr_msis)) ?
256 eithreshold : nr_msis;
257 for (ei = 0; ei < nr_eix; ei++) {
258 eix = &mrif->eix[ei];
259 eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) &
260 imsic_mrif_atomic_read(mrif, &eix->eip[0]);
262 eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) &
263 imsic_mrif_atomic_read(mrif, &eix->eip[1]);
264 if (!eipend[0] && !eipend[1])
270 imin = ei * BITS_PER_TYPE(u64);
271 imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ?
272 imin + BITS_PER_TYPE(u64) : max_msi;
273 for (i = (!imin) ? 1 : imin; i < imax; i++) {
274 if (test_bit(i - imin, eipend))
275 return (i << TOPEI_ID_SHIFT) | i;
282 static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel)
287 case IMSIC_EIDELIVERY:
288 case IMSIC_EITHRESHOLD:
290 case IMSIC_EIP0 ... IMSIC_EIP63:
291 num = isel - IMSIC_EIP0;
293 case IMSIC_EIE0 ... IMSIC_EIE63:
294 num = isel - IMSIC_EIE0;
303 if ((num / 2) >= nr_eix)
309 static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix,
310 unsigned long isel, unsigned long *val,
311 unsigned long new_val, unsigned long wr_mask)
314 struct imsic_mrif_eix *eix;
315 unsigned long *ei, num, old_val = 0;
318 case IMSIC_EIDELIVERY:
319 old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eidelivery,
320 new_val, wr_mask & 0x1);
322 case IMSIC_EITHRESHOLD:
323 old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold,
324 new_val, wr_mask & (IMSIC_MAX_ID - 1));
326 case IMSIC_EIP0 ... IMSIC_EIP63:
327 case IMSIC_EIE0 ... IMSIC_EIE63:
328 if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) {
330 num = isel - IMSIC_EIP0;
333 num = isel - IMSIC_EIE0;
336 if ((num / 2) >= nr_eix)
338 eix = &mrif->eix[num / 2];
343 ei = (pend) ? &eix->eip[0] : &eix->eie[0];
345 ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1];
348 /* Bit0 of EIP0 or EIE0 is read-only */
352 old_val = imsic_mrif_atomic_rmw(mrif, ei, new_val, wr_mask);
364 struct imsic_vsfile_read_data {
368 struct imsic_mrif *mrif;
371 static void imsic_vsfile_local_read(void *data)
374 struct imsic_mrif_eix *eix;
375 struct imsic_vsfile_read_data *idata = data;
376 struct imsic_mrif *mrif = idata->mrif;
377 unsigned long new_hstatus, old_hstatus, old_vsiselect;
379 old_vsiselect = csr_read(CSR_VSISELECT);
380 old_hstatus = csr_read(CSR_HSTATUS);
381 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
382 new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
383 csr_write(CSR_HSTATUS, new_hstatus);
386 * We don't use imsic_mrif_atomic_xyz() functions to store
387 * values in MRIF because imsic_vsfile_read() is always called
388 * with pointer to temporary MRIF on stack.
392 mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0);
393 mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0);
394 for (i = 0; i < idata->nr_eix; i++) {
396 eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0);
397 eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0);
399 eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0);
400 eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0);
404 mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY);
405 mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
406 for (i = 0; i < idata->nr_eix; i++) {
408 eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2);
409 eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2);
411 eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1);
412 eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1);
417 csr_write(CSR_HSTATUS, old_hstatus);
418 csr_write(CSR_VSISELECT, old_vsiselect);
421 static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
422 bool clear, struct imsic_mrif *mrif)
424 struct imsic_vsfile_read_data idata;
426 /* We can only read clear if we have a IMSIC VS-file */
427 if (vsfile_cpu < 0 || vsfile_hgei <= 0)
430 /* We can only read clear on local CPU */
431 idata.hgei = vsfile_hgei;
432 idata.nr_eix = nr_eix;
435 on_each_cpu_mask(cpumask_of(vsfile_cpu),
436 imsic_vsfile_local_read, &idata, 1);
439 struct imsic_vsfile_rw_data {
446 static void imsic_vsfile_local_rw(void *data)
448 struct imsic_vsfile_rw_data *idata = data;
449 unsigned long new_hstatus, old_hstatus, old_vsiselect;
451 old_vsiselect = csr_read(CSR_VSISELECT);
452 old_hstatus = csr_read(CSR_HSTATUS);
453 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
454 new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
455 csr_write(CSR_HSTATUS, new_hstatus);
457 switch (idata->isel) {
458 case IMSIC_EIDELIVERY:
460 imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val);
462 idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY);
464 case IMSIC_EITHRESHOLD:
466 imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val);
468 idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
470 case IMSIC_EIP0 ... IMSIC_EIP63:
471 case IMSIC_EIE0 ... IMSIC_EIE63:
473 if (idata->isel & 0x1)
477 imsic_eix_write(idata->isel, idata->val);
479 idata->val = imsic_eix_read(idata->isel);
485 csr_write(CSR_HSTATUS, old_hstatus);
486 csr_write(CSR_VSISELECT, old_vsiselect);
489 static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
490 unsigned long isel, bool write,
494 struct imsic_vsfile_rw_data rdata;
496 /* We can only access register if we have a IMSIC VS-file */
497 if (vsfile_cpu < 0 || vsfile_hgei <= 0)
500 /* Check IMSIC register iselect */
501 rc = imsic_mrif_isel_check(nr_eix, isel);
505 /* We can only access register on local CPU */
506 rdata.hgei = vsfile_hgei;
509 rdata.val = (write) ? *val : 0;
510 on_each_cpu_mask(cpumask_of(vsfile_cpu),
511 imsic_vsfile_local_rw, &rdata, 1);
519 static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix)
522 unsigned long new_hstatus, old_hstatus, old_vsiselect;
524 /* We can only zero-out if we have a IMSIC VS-file */
525 if (vsfile_hgei <= 0)
528 old_vsiselect = csr_read(CSR_VSISELECT);
529 old_hstatus = csr_read(CSR_HSTATUS);
530 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
531 new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
532 csr_write(CSR_HSTATUS, new_hstatus);
534 imsic_vs_csr_write(IMSIC_EIDELIVERY, 0);
535 imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0);
536 for (i = 0; i < nr_eix; i++) {
537 imsic_eix_write(IMSIC_EIP0 + i * 2, 0);
538 imsic_eix_write(IMSIC_EIE0 + i * 2, 0);
540 imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0);
541 imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0);
545 csr_write(CSR_HSTATUS, old_hstatus);
546 csr_write(CSR_VSISELECT, old_vsiselect);
549 static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix,
550 struct imsic_mrif *mrif)
553 struct imsic_mrif_eix *eix;
554 unsigned long new_hstatus, old_hstatus, old_vsiselect;
556 /* We can only update if we have a HW IMSIC context */
557 if (vsfile_hgei <= 0)
561 * We don't use imsic_mrif_atomic_xyz() functions to read values
562 * from MRIF in this function because it is always called with
563 * pointer to temporary MRIF on stack.
566 old_vsiselect = csr_read(CSR_VSISELECT);
567 old_hstatus = csr_read(CSR_HSTATUS);
568 new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
569 new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
570 csr_write(CSR_HSTATUS, new_hstatus);
572 for (i = 0; i < nr_eix; i++) {
574 imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]);
575 imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]);
577 imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]);
578 imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]);
581 imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold);
582 imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery);
584 csr_write(CSR_HSTATUS, old_hstatus);
585 csr_write(CSR_VSISELECT, old_vsiselect);
588 static void imsic_vsfile_cleanup(struct imsic *imsic)
590 int old_vsfile_hgei, old_vsfile_cpu;
594 * We don't use imsic_mrif_atomic_xyz() functions to clear the
595 * SW-file in this function because it is always called when the
596 * VCPU is being destroyed.
599 write_lock_irqsave(&imsic->vsfile_lock, flags);
600 old_vsfile_hgei = imsic->vsfile_hgei;
601 old_vsfile_cpu = imsic->vsfile_cpu;
602 imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
603 imsic->vsfile_va = NULL;
604 imsic->vsfile_pa = 0;
605 write_unlock_irqrestore(&imsic->vsfile_lock, flags);
607 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
609 if (old_vsfile_cpu >= 0)
610 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
613 static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
615 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
616 struct imsic_mrif *mrif = imsic->swfile;
620 * The critical section is necessary during external interrupt
621 * updates to avoid the risk of losing interrupts due to potential
622 * interruptions between reading topei and updating pending status.
625 spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
627 if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
628 imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
629 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
631 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
633 spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
636 static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
637 struct imsic_mrif *mrif)
639 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
642 * We don't use imsic_mrif_atomic_xyz() functions to read and
643 * write SW-file and MRIF in this function because it is always
644 * called when VCPU is not using SW-file and the MRIF points to
645 * a temporary MRIF on stack.
648 memcpy(mrif, imsic->swfile, sizeof(*mrif));
650 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
651 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
655 static void imsic_swfile_update(struct kvm_vcpu *vcpu,
656 struct imsic_mrif *mrif)
659 struct imsic_mrif_eix *seix, *eix;
660 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
661 struct imsic_mrif *smrif = imsic->swfile;
663 imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery);
664 imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold);
665 for (i = 0; i < imsic->nr_eix; i++) {
666 seix = &smrif->eix[i];
668 imsic_mrif_atomic_or(smrif, &seix->eip[0], eix->eip[0]);
669 imsic_mrif_atomic_or(smrif, &seix->eie[0], eix->eie[0]);
671 imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]);
672 imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]);
676 imsic_swfile_extirq_update(vcpu);
679 void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
682 struct imsic_mrif tmrif;
683 int old_vsfile_hgei, old_vsfile_cpu;
684 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
686 /* Read and clear IMSIC VS-file details */
687 write_lock_irqsave(&imsic->vsfile_lock, flags);
688 old_vsfile_hgei = imsic->vsfile_hgei;
689 old_vsfile_cpu = imsic->vsfile_cpu;
690 imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
691 imsic->vsfile_va = NULL;
692 imsic->vsfile_pa = 0;
693 write_unlock_irqrestore(&imsic->vsfile_lock, flags);
695 /* Do nothing, if no IMSIC VS-file to release */
696 if (old_vsfile_cpu < 0)
700 * At this point, all interrupt producers are still using
701 * the old IMSIC VS-file so we first re-direct all interrupt
705 /* Purge the G-stage mapping */
706 kvm_riscv_gstage_iounmap(vcpu->kvm,
707 vcpu->arch.aia_context.imsic_addr,
710 /* TODO: Purge the IOMMU mapping ??? */
713 * At this point, all interrupt producers have been re-directed
714 * to somewhere else so we move register state from the old IMSIC
715 * VS-file to the IMSIC SW-file.
718 /* Read and clear register state from old IMSIC VS-file */
719 memset(&tmrif, 0, sizeof(tmrif));
720 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix,
723 /* Update register state in IMSIC SW-file */
724 imsic_swfile_update(vcpu, &tmrif);
726 /* Free-up old IMSIC VS-file */
727 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
730 int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
733 phys_addr_t new_vsfile_pa;
734 struct imsic_mrif tmrif;
735 void __iomem *new_vsfile_va;
736 struct kvm *kvm = vcpu->kvm;
737 struct kvm_run *run = vcpu->run;
738 struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
739 struct imsic *imsic = vaia->imsic_state;
740 int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu;
742 /* Do nothing for emulation mode */
743 if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
746 /* Read old IMSIC VS-file details */
747 read_lock_irqsave(&imsic->vsfile_lock, flags);
748 old_vsfile_hgei = imsic->vsfile_hgei;
749 old_vsfile_cpu = imsic->vsfile_cpu;
750 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
752 /* Do nothing if we are continuing on same CPU */
753 if (old_vsfile_cpu == vcpu->cpu)
756 /* Allocate new IMSIC VS-file */
757 ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
758 &new_vsfile_va, &new_vsfile_pa);
760 /* For HW acceleration mode, we can't continue */
761 if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) {
762 run->fail_entry.hardware_entry_failure_reason =
764 run->fail_entry.cpu = vcpu->cpu;
765 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
769 /* Release old IMSIC VS-file */
770 if (old_vsfile_cpu >= 0)
771 kvm_riscv_vcpu_aia_imsic_release(vcpu);
773 /* For automatic mode, we continue */
776 new_vsfile_hgei = ret;
779 * At this point, all interrupt producers are still using
780 * to the old IMSIC VS-file so we first move all interrupt
781 * producers to the new IMSIC VS-file.
784 /* Zero-out new IMSIC VS-file */
785 imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
787 /* Update G-stage mapping for the new IMSIC VS-file */
788 ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
789 new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
792 goto fail_free_vsfile_hgei;
794 /* TODO: Update the IOMMU mapping ??? */
796 /* Update new IMSIC VS-file details in IMSIC context */
797 write_lock_irqsave(&imsic->vsfile_lock, flags);
798 imsic->vsfile_hgei = new_vsfile_hgei;
799 imsic->vsfile_cpu = vcpu->cpu;
800 imsic->vsfile_va = new_vsfile_va;
801 imsic->vsfile_pa = new_vsfile_pa;
802 write_unlock_irqrestore(&imsic->vsfile_lock, flags);
805 * At this point, all interrupt producers have been moved
806 * to the new IMSIC VS-file so we move register state from
807 * the old IMSIC VS/SW-file to the new IMSIC VS-file.
810 memset(&tmrif, 0, sizeof(tmrif));
811 if (old_vsfile_cpu >= 0) {
812 /* Read and clear register state from old IMSIC VS-file */
813 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu,
814 imsic->nr_hw_eix, true, &tmrif);
816 /* Free-up old IMSIC VS-file */
817 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
819 /* Read and clear register state from IMSIC SW-file */
820 imsic_swfile_read(vcpu, true, &tmrif);
823 /* Restore register state in the new IMSIC VS-file */
824 imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif);
827 /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */
828 vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN;
829 if (new_vsfile_hgei > 0)
830 vcpu->arch.guest_context.hstatus |=
831 ((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
833 /* Continue run-loop */
836 fail_free_vsfile_hgei:
837 kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
841 int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
842 unsigned long *val, unsigned long new_val,
843 unsigned long wr_mask)
846 struct imsic_mrif_eix *eix;
847 int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
848 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
850 if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
851 /* Read pending and enabled interrupt with highest priority */
852 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
857 /* Writes ignore value and clear top pending interrupt */
858 if (topei && wr_mask) {
859 topei >>= TOPEI_ID_SHIFT;
861 eix = &imsic->swfile->eix[topei /
863 clear_bit(topei & (BITS_PER_TYPE(u64) - 1),
868 r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel,
869 val, new_val, wr_mask);
870 /* Forward unknown IMSIC register to user-space */
872 rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP;
876 imsic_swfile_extirq_update(vcpu);
881 int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
882 bool write, unsigned long *val)
887 struct kvm_vcpu *vcpu;
888 int rc, vsfile_hgei, vsfile_cpu;
890 if (!kvm_riscv_aia_initialized(kvm))
893 vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
894 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
898 isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
899 imsic = vcpu->arch.aia_context.imsic_state;
901 read_lock_irqsave(&imsic->vsfile_lock, flags);
904 vsfile_hgei = imsic->vsfile_hgei;
905 vsfile_cpu = imsic->vsfile_cpu;
906 if (vsfile_cpu < 0) {
908 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
909 isel, NULL, *val, -1UL);
910 imsic_swfile_extirq_update(vcpu);
912 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
916 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
918 if (!rc && vsfile_cpu >= 0)
919 rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
925 int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
929 struct kvm_vcpu *vcpu;
931 if (!kvm_riscv_aia_initialized(kvm))
934 vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
935 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
939 isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
940 imsic = vcpu->arch.aia_context.imsic_state;
941 return imsic_mrif_isel_check(imsic->nr_eix, isel);
944 void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu)
946 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
951 kvm_riscv_vcpu_aia_imsic_release(vcpu);
953 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
956 int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
957 u32 guest_index, u32 offset, u32 iid)
960 struct imsic_mrif_eix *eix;
961 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
963 /* We only emulate one IMSIC MMIO page for each Guest VCPU */
964 if (!imsic || !iid || guest_index ||
965 (offset != IMSIC_MMIO_SETIPNUM_LE &&
966 offset != IMSIC_MMIO_SETIPNUM_BE))
969 iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid;
970 if (imsic->nr_msis <= iid)
973 read_lock_irqsave(&imsic->vsfile_lock, flags);
975 if (imsic->vsfile_cpu >= 0) {
976 writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
979 eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
980 set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
981 imsic_swfile_extirq_update(vcpu);
984 read_unlock_irqrestore(&imsic->vsfile_lock, flags);
989 static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
990 gpa_t addr, int len, void *val)
992 if (len != 4 || (addr & 0x3) != 0)
1000 static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1001 gpa_t addr, int len, const void *val)
1003 struct kvm_msi msi = { 0 };
1005 if (len != 4 || (addr & 0x3) != 0)
1008 msi.address_hi = addr >> 32;
1009 msi.address_lo = (u32)addr;
1010 msi.data = *((const u32 *)val);
1011 kvm_riscv_aia_inject_msi(vcpu->kvm, &msi);
1016 static struct kvm_io_device_ops imsic_iodoev_ops = {
1017 .read = imsic_mmio_read,
1018 .write = imsic_mmio_write,
1021 int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
1024 struct imsic *imsic;
1025 struct page *swfile_page;
1026 struct kvm *kvm = vcpu->kvm;
1028 /* Fail if we have zero IDs */
1029 if (!kvm->arch.aia.nr_ids)
1032 /* Allocate IMSIC context */
1033 imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
1036 vcpu->arch.aia_context.imsic_state = imsic;
1038 /* Setup IMSIC context */
1039 imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
1040 rwlock_init(&imsic->vsfile_lock);
1041 imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
1042 imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
1043 imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
1045 /* Setup IMSIC SW-file */
1046 swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
1047 get_order(sizeof(*imsic->swfile)));
1050 goto fail_free_imsic;
1052 imsic->swfile = page_to_virt(swfile_page);
1053 imsic->swfile_pa = page_to_phys(swfile_page);
1054 spin_lock_init(&imsic->swfile_extirq_lock);
1056 /* Setup IO device */
1057 kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
1058 mutex_lock(&kvm->slots_lock);
1059 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
1060 vcpu->arch.aia_context.imsic_addr,
1061 KVM_DEV_RISCV_IMSIC_SIZE,
1063 mutex_unlock(&kvm->slots_lock);
1065 goto fail_free_swfile;
1070 free_pages((unsigned long)imsic->swfile,
1071 get_order(sizeof(*imsic->swfile)));
1073 vcpu->arch.aia_context.imsic_state = NULL;
1078 void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu)
1080 struct kvm *kvm = vcpu->kvm;
1081 struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
1086 imsic_vsfile_cleanup(imsic);
1088 mutex_lock(&kvm->slots_lock);
1089 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev);
1090 mutex_unlock(&kvm->slots_lock);
1092 free_pages((unsigned long)imsic->swfile,
1093 get_order(sizeof(*imsic->swfile)));
1095 vcpu->arch.aia_context.imsic_state = NULL;