arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / loongarch / kvm / vcpu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17         KVM_GENERIC_VCPU_STATS(),
18         STATS_DESC_COUNTER(VCPU, int_exits),
19         STATS_DESC_COUNTER(VCPU, idle_exits),
20         STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21         STATS_DESC_COUNTER(VCPU, signal_exits),
22 };
23
24 const struct kvm_stats_header kvm_vcpu_stats_header = {
25         .name_size = KVM_STATS_NAME_SIZE,
26         .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
27         .id_offset = sizeof(struct kvm_stats_header),
28         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
29         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
30                        sizeof(kvm_vcpu_stats_desc),
31 };
32
33 /*
34  * kvm_check_requests - check and handle pending vCPU requests
35  *
36  * Return: RESUME_GUEST if we should enter the guest
37  *         RESUME_HOST  if we should exit to userspace
38  */
39 static int kvm_check_requests(struct kvm_vcpu *vcpu)
40 {
41         if (!kvm_request_pending(vcpu))
42                 return RESUME_GUEST;
43
44         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
45                 vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
46
47         if (kvm_dirty_ring_check_request(vcpu))
48                 return RESUME_HOST;
49
50         return RESUME_GUEST;
51 }
52
53 /*
54  * Check and handle pending signal and vCPU requests etc
55  * Run with irq enabled and preempt enabled
56  *
57  * Return: RESUME_GUEST if we should enter the guest
58  *         RESUME_HOST  if we should exit to userspace
59  *         < 0 if we should exit to userspace, where the return value
60  *         indicates an error
61  */
62 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
63 {
64         int ret;
65
66         /*
67          * Check conditions before entering the guest
68          */
69         ret = xfer_to_guest_mode_handle_work(vcpu);
70         if (ret < 0)
71                 return ret;
72
73         ret = kvm_check_requests(vcpu);
74
75         return ret;
76 }
77
78 /*
79  * Called with irq enabled
80  *
81  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
82  *         Others if we should exit to userspace
83  */
84 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
85 {
86         int ret;
87
88         do {
89                 ret = kvm_enter_guest_check(vcpu);
90                 if (ret != RESUME_GUEST)
91                         break;
92
93                 /*
94                  * Handle vcpu timer, interrupts, check requests and
95                  * check vmid before vcpu enter guest
96                  */
97                 local_irq_disable();
98                 kvm_acquire_timer(vcpu);
99                 kvm_deliver_intr(vcpu);
100                 kvm_deliver_exception(vcpu);
101                 /* Make sure the vcpu mode has been written */
102                 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
103                 kvm_check_vpid(vcpu);
104                 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
105                 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
106                 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
107
108                 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
109                         /* make sure the vcpu mode has been written */
110                         smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
111                         local_irq_enable();
112                         ret = -EAGAIN;
113                 }
114         } while (ret != RESUME_GUEST);
115
116         return ret;
117 }
118
119 /*
120  * Return 1 for resume guest and "<= 0" for resume host.
121  */
122 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
123 {
124         int ret = RESUME_GUEST;
125         unsigned long estat = vcpu->arch.host_estat;
126         u32 intr = estat & 0x1fff; /* Ignore NMI */
127         u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
128
129         vcpu->mode = OUTSIDE_GUEST_MODE;
130
131         /* Set a default exit reason */
132         run->exit_reason = KVM_EXIT_UNKNOWN;
133
134         guest_timing_exit_irqoff();
135         guest_state_exit_irqoff();
136         local_irq_enable();
137
138         trace_kvm_exit(vcpu, ecode);
139         if (ecode) {
140                 ret = kvm_handle_fault(vcpu, ecode);
141         } else {
142                 WARN(!intr, "vm exiting with suspicious irq\n");
143                 ++vcpu->stat.int_exits;
144         }
145
146         if (ret == RESUME_GUEST)
147                 ret = kvm_pre_enter_guest(vcpu);
148
149         if (ret != RESUME_GUEST) {
150                 local_irq_disable();
151                 return ret;
152         }
153
154         guest_timing_enter_irqoff();
155         guest_state_enter_irqoff();
156         trace_kvm_reenter(vcpu);
157
158         return RESUME_GUEST;
159 }
160
161 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
162 {
163         return !!(vcpu->arch.irq_pending) &&
164                 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
165 }
166
167 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
168 {
169         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
170 }
171
172 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
173 {
174         return false;
175 }
176
177 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
178 {
179         return VM_FAULT_SIGBUS;
180 }
181
182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
183                                   struct kvm_translation *tr)
184 {
185         return -EINVAL;
186 }
187
188 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
189 {
190         return kvm_pending_timer(vcpu) ||
191                 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
192 }
193
194 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
195 {
196         int i;
197
198         kvm_debug("vCPU Register Dump:\n");
199         kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
200         kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
201
202         for (i = 0; i < 32; i += 4) {
203                 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
204                        vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
205                        vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
206         }
207
208         kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
209                   kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
210                   kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
211
212         kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
213
214         return 0;
215 }
216
217 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
218                                 struct kvm_mp_state *mp_state)
219 {
220         *mp_state = vcpu->arch.mp_state;
221
222         return 0;
223 }
224
225 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
226                                 struct kvm_mp_state *mp_state)
227 {
228         int ret = 0;
229
230         switch (mp_state->mp_state) {
231         case KVM_MP_STATE_RUNNABLE:
232                 vcpu->arch.mp_state = *mp_state;
233                 break;
234         default:
235                 ret = -EINVAL;
236         }
237
238         return ret;
239 }
240
241 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
242                                         struct kvm_guest_debug *dbg)
243 {
244         return -EINVAL;
245 }
246
247 /**
248  * kvm_migrate_count() - Migrate timer.
249  * @vcpu:       Virtual CPU.
250  *
251  * Migrate hrtimer to the current CPU by cancelling and restarting it
252  * if the hrtimer is active.
253  *
254  * Must be called when the vCPU is migrated to a different CPU, so that
255  * the timer can interrupt the guest at the new CPU, and the timer irq can
256  * be delivered to the vCPU.
257  */
258 static void kvm_migrate_count(struct kvm_vcpu *vcpu)
259 {
260         if (hrtimer_cancel(&vcpu->arch.swtimer))
261                 hrtimer_restart(&vcpu->arch.swtimer);
262 }
263
264 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
265 {
266         unsigned long gintc;
267         struct loongarch_csrs *csr = vcpu->arch.csr;
268
269         if (get_gcsr_flag(id) & INVALID_GCSR)
270                 return -EINVAL;
271
272         if (id == LOONGARCH_CSR_ESTAT) {
273                 /* ESTAT IP0~IP7 get from GINTC */
274                 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
275                 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
276                 return 0;
277         }
278
279         /*
280          * Get software CSR state since software state is consistent
281          * with hardware for synchronous ioctl
282          */
283         *val = kvm_read_sw_gcsr(csr, id);
284
285         return 0;
286 }
287
288 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
289 {
290         int ret = 0, gintc;
291         struct loongarch_csrs *csr = vcpu->arch.csr;
292
293         if (get_gcsr_flag(id) & INVALID_GCSR)
294                 return -EINVAL;
295
296         if (id == LOONGARCH_CSR_ESTAT) {
297                 /* ESTAT IP0~IP7 inject through GINTC */
298                 gintc = (val >> 2) & 0xff;
299                 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
300
301                 gintc = val & ~(0xffUL << 2);
302                 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
303
304                 return ret;
305         }
306
307         kvm_write_sw_gcsr(csr, id, val);
308
309         return ret;
310 }
311
312 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
313                 const struct kvm_one_reg *reg, u64 *v)
314 {
315         int id, ret = 0;
316         u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
317
318         switch (type) {
319         case KVM_REG_LOONGARCH_CSR:
320                 id = KVM_GET_IOC_CSR_IDX(reg->id);
321                 ret = _kvm_getcsr(vcpu, id, v);
322                 break;
323         case KVM_REG_LOONGARCH_CPUCFG:
324                 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
325                 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
326                         *v = vcpu->arch.cpucfg[id];
327                 else
328                         ret = -EINVAL;
329                 break;
330         case KVM_REG_LOONGARCH_KVM:
331                 switch (reg->id) {
332                 case KVM_REG_LOONGARCH_COUNTER:
333                         *v = drdtime() + vcpu->kvm->arch.time_offset;
334                         break;
335                 default:
336                         ret = -EINVAL;
337                         break;
338                 }
339                 break;
340         default:
341                 ret = -EINVAL;
342                 break;
343         }
344
345         return ret;
346 }
347
348 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
349 {
350         int ret = 0;
351         u64 v, size = reg->id & KVM_REG_SIZE_MASK;
352
353         switch (size) {
354         case KVM_REG_SIZE_U64:
355                 ret = kvm_get_one_reg(vcpu, reg, &v);
356                 if (ret)
357                         return ret;
358                 ret = put_user(v, (u64 __user *)(long)reg->addr);
359                 break;
360         default:
361                 ret = -EINVAL;
362                 break;
363         }
364
365         return ret;
366 }
367
368 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
369                         const struct kvm_one_reg *reg, u64 v)
370 {
371         int id, ret = 0;
372         u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
373
374         switch (type) {
375         case KVM_REG_LOONGARCH_CSR:
376                 id = KVM_GET_IOC_CSR_IDX(reg->id);
377                 ret = _kvm_setcsr(vcpu, id, v);
378                 break;
379         case KVM_REG_LOONGARCH_CPUCFG:
380                 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
381                 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
382                         vcpu->arch.cpucfg[id] = (u32)v;
383                 else
384                         ret = -EINVAL;
385                 break;
386         case KVM_REG_LOONGARCH_KVM:
387                 switch (reg->id) {
388                 case KVM_REG_LOONGARCH_COUNTER:
389                         /*
390                          * gftoffset is relative with board, not vcpu
391                          * only set for the first time for smp system
392                          */
393                         if (vcpu->vcpu_id == 0)
394                                 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
395                         break;
396                 case KVM_REG_LOONGARCH_VCPU_RESET:
397                         kvm_reset_timer(vcpu);
398                         memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
399                         memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
400                         break;
401                 default:
402                         ret = -EINVAL;
403                         break;
404                 }
405                 break;
406         default:
407                 ret = -EINVAL;
408                 break;
409         }
410
411         return ret;
412 }
413
414 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
415 {
416         int ret = 0;
417         u64 v, size = reg->id & KVM_REG_SIZE_MASK;
418
419         switch (size) {
420         case KVM_REG_SIZE_U64:
421                 ret = get_user(v, (u64 __user *)(long)reg->addr);
422                 if (ret)
423                         return ret;
424                 break;
425         default:
426                 return -EINVAL;
427         }
428
429         return kvm_set_one_reg(vcpu, reg, v);
430 }
431
432 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
433 {
434         return -ENOIOCTLCMD;
435 }
436
437 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
438 {
439         return -ENOIOCTLCMD;
440 }
441
442 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
443 {
444         int i;
445
446         for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
447                 regs->gpr[i] = vcpu->arch.gprs[i];
448
449         regs->pc = vcpu->arch.pc;
450
451         return 0;
452 }
453
454 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
455 {
456         int i;
457
458         for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
459                 vcpu->arch.gprs[i] = regs->gpr[i];
460
461         vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
462         vcpu->arch.pc = regs->pc;
463
464         return 0;
465 }
466
467 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
468                                      struct kvm_enable_cap *cap)
469 {
470         /* FPU is enabled by default, will support LSX/LASX later. */
471         return -EINVAL;
472 }
473
474 long kvm_arch_vcpu_ioctl(struct file *filp,
475                          unsigned int ioctl, unsigned long arg)
476 {
477         long r;
478         void __user *argp = (void __user *)arg;
479         struct kvm_vcpu *vcpu = filp->private_data;
480
481         /*
482          * Only software CSR should be modified
483          *
484          * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
485          * should be used. Since CSR registers owns by this vcpu, if switch
486          * to other vcpus, other vcpus need reload CSR registers.
487          *
488          * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
489          * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
490          * aux_inuse flag and reload CSR registers form software.
491          */
492
493         switch (ioctl) {
494         case KVM_SET_ONE_REG:
495         case KVM_GET_ONE_REG: {
496                 struct kvm_one_reg reg;
497
498                 r = -EFAULT;
499                 if (copy_from_user(&reg, argp, sizeof(reg)))
500                         break;
501                 if (ioctl == KVM_SET_ONE_REG) {
502                         r = kvm_set_reg(vcpu, &reg);
503                         vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
504                 } else
505                         r = kvm_get_reg(vcpu, &reg);
506                 break;
507         }
508         case KVM_ENABLE_CAP: {
509                 struct kvm_enable_cap cap;
510
511                 r = -EFAULT;
512                 if (copy_from_user(&cap, argp, sizeof(cap)))
513                         break;
514                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
515                 break;
516         }
517         default:
518                 r = -ENOIOCTLCMD;
519                 break;
520         }
521
522         return r;
523 }
524
525 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
526 {
527         int i = 0;
528
529         fpu->fcc = vcpu->arch.fpu.fcc;
530         fpu->fcsr = vcpu->arch.fpu.fcsr;
531         for (i = 0; i < NUM_FPU_REGS; i++)
532                 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
533
534         return 0;
535 }
536
537 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
538 {
539         int i = 0;
540
541         vcpu->arch.fpu.fcc = fpu->fcc;
542         vcpu->arch.fpu.fcsr = fpu->fcsr;
543         for (i = 0; i < NUM_FPU_REGS; i++)
544                 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
545
546         return 0;
547 }
548
549 /* Enable FPU and restore context */
550 void kvm_own_fpu(struct kvm_vcpu *vcpu)
551 {
552         preempt_disable();
553
554         /* Enable FPU */
555         set_csr_euen(CSR_EUEN_FPEN);
556
557         kvm_restore_fpu(&vcpu->arch.fpu);
558         vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
559         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
560
561         preempt_enable();
562 }
563
564 /* Save context and disable FPU */
565 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
566 {
567         preempt_disable();
568
569         if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
570                 kvm_save_fpu(&vcpu->arch.fpu);
571                 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
572                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
573
574                 /* Disable FPU */
575                 clear_csr_euen(CSR_EUEN_FPEN);
576         }
577
578         preempt_enable();
579 }
580
581 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
582 {
583         int intr = (int)irq->irq;
584
585         if (intr > 0)
586                 kvm_queue_irq(vcpu, intr);
587         else if (intr < 0)
588                 kvm_dequeue_irq(vcpu, -intr);
589         else {
590                 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
591                 return -EINVAL;
592         }
593
594         kvm_vcpu_kick(vcpu);
595
596         return 0;
597 }
598
599 long kvm_arch_vcpu_async_ioctl(struct file *filp,
600                                unsigned int ioctl, unsigned long arg)
601 {
602         void __user *argp = (void __user *)arg;
603         struct kvm_vcpu *vcpu = filp->private_data;
604
605         if (ioctl == KVM_INTERRUPT) {
606                 struct kvm_interrupt irq;
607
608                 if (copy_from_user(&irq, argp, sizeof(irq)))
609                         return -EFAULT;
610
611                 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
612
613                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
614         }
615
616         return -ENOIOCTLCMD;
617 }
618
619 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
620 {
621         return 0;
622 }
623
624 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
625 {
626         unsigned long timer_hz;
627         struct loongarch_csrs *csr;
628
629         vcpu->arch.vpid = 0;
630
631         hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
632         vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
633
634         vcpu->arch.handle_exit = kvm_handle_exit;
635         vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
636         vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
637         if (!vcpu->arch.csr)
638                 return -ENOMEM;
639
640         /*
641          * All kvm exceptions share one exception entry, and host <-> guest
642          * switch also switch ECFG.VS field, keep host ECFG.VS info here.
643          */
644         vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
645
646         /* Init */
647         vcpu->arch.last_sched_cpu = -1;
648
649         /*
650          * Initialize guest register state to valid architectural reset state.
651          */
652         timer_hz = calc_const_freq();
653         kvm_init_timer(vcpu, timer_hz);
654
655         /* Set Initialize mode for guest */
656         csr = vcpu->arch.csr;
657         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
658
659         /* Set cpuid */
660         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
661
662         /* Start with no pending virtual guest interrupts */
663         csr->csrs[LOONGARCH_CSR_GINTC] = 0;
664
665         return 0;
666 }
667
668 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
669 {
670 }
671
672 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
673 {
674         int cpu;
675         struct kvm_context *context;
676
677         hrtimer_cancel(&vcpu->arch.swtimer);
678         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
679         kfree(vcpu->arch.csr);
680
681         /*
682          * If the vCPU is freed and reused as another vCPU, we don't want the
683          * matching pointer wrongly hanging around in last_vcpu.
684          */
685         for_each_possible_cpu(cpu) {
686                 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
687                 if (context->last_vcpu == vcpu)
688                         context->last_vcpu = NULL;
689         }
690 }
691
692 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
693 {
694         bool migrated;
695         struct kvm_context *context;
696         struct loongarch_csrs *csr = vcpu->arch.csr;
697
698         /*
699          * Have we migrated to a different CPU?
700          * If so, any old guest TLB state may be stale.
701          */
702         migrated = (vcpu->arch.last_sched_cpu != cpu);
703
704         /*
705          * Was this the last vCPU to run on this CPU?
706          * If not, any old guest state from this vCPU will have been clobbered.
707          */
708         context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
709         if (migrated || (context->last_vcpu != vcpu))
710                 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
711         context->last_vcpu = vcpu;
712
713         /* Restore timer state regardless */
714         kvm_restore_timer(vcpu);
715
716         /* Control guest page CCA attribute */
717         change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
718
719         /* Don't bother restoring registers multiple times unless necessary */
720         if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
721                 return 0;
722
723         write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
724
725         /* Restore guest CSR registers */
726         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
727         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
728         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
729         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
730         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
731         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
732         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
733         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
734         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
735         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
736         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
737         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
738         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
739         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
740         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
741         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
742         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
743         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
744         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
745         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
746         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
747         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
748         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
749         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
750         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
751         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
752         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
753         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
754         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
755         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
756         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
757         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
758         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
759         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
760         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
761         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
762         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
763         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
764         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
765         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
766         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
767         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
768         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
769         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
770
771         /* Restore Root.GINTC from unused Guest.GINTC register */
772         write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
773
774         /*
775          * We should clear linked load bit to break interrupted atomics. This
776          * prevents a SC on the next vCPU from succeeding by matching a LL on
777          * the previous vCPU.
778          */
779         if (vcpu->kvm->created_vcpus > 1)
780                 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
781
782         vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
783
784         return 0;
785 }
786
787 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
788 {
789         unsigned long flags;
790
791         local_irq_save(flags);
792         if (vcpu->arch.last_sched_cpu != cpu) {
793                 kvm_debug("[%d->%d]KVM vCPU[%d] switch\n",
794                                 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
795                 /*
796                  * Migrate the timer interrupt to the current CPU so that it
797                  * always interrupts the guest and synchronously triggers a
798                  * guest timer interrupt.
799                  */
800                 kvm_migrate_count(vcpu);
801         }
802
803         /* Restore guest state to registers */
804         _kvm_vcpu_load(vcpu, cpu);
805         local_irq_restore(flags);
806 }
807
808 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
809 {
810         struct loongarch_csrs *csr = vcpu->arch.csr;
811
812         kvm_lose_fpu(vcpu);
813
814         /*
815          * Update CSR state from hardware if software CSR state is stale,
816          * most CSR registers are kept unchanged during process context
817          * switch except CSR registers like remaining timer tick value and
818          * injected interrupt state.
819          */
820         if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
821                 goto out;
822
823         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
824         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
825         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
826         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
827         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
828         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
829         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
830         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
831         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
832         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
833         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
834         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
835         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
836         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
837         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
838         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
839         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
840         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
841         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
842         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
843         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
844         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
845         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
846         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
847         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
848         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
849         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
850         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
851         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
852         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
853         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
854         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
855         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
856         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
857         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
858         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
859         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
860         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
861         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
862         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
863         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
864         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
865         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
866         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
867         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
868         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
869         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
870
871         vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
872
873 out:
874         kvm_save_timer(vcpu);
875         /* Save Root.GINTC into unused Guest.GINTC register */
876         csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
877
878         return 0;
879 }
880
881 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
882 {
883         int cpu;
884         unsigned long flags;
885
886         local_irq_save(flags);
887         cpu = smp_processor_id();
888         vcpu->arch.last_sched_cpu = cpu;
889
890         /* Save guest state in registers */
891         _kvm_vcpu_put(vcpu, cpu);
892         local_irq_restore(flags);
893 }
894
895 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
896 {
897         int r = -EINTR;
898         struct kvm_run *run = vcpu->run;
899
900         if (vcpu->mmio_needed) {
901                 if (!vcpu->mmio_is_write)
902                         kvm_complete_mmio_read(vcpu, run);
903                 vcpu->mmio_needed = 0;
904         }
905
906         if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
907                 if (!run->iocsr_io.is_write)
908                         kvm_complete_iocsr_read(vcpu, run);
909         }
910
911         if (run->immediate_exit)
912                 return r;
913
914         /* Clear exit_reason */
915         run->exit_reason = KVM_EXIT_UNKNOWN;
916         lose_fpu(1);
917         vcpu_load(vcpu);
918         kvm_sigset_activate(vcpu);
919         r = kvm_pre_enter_guest(vcpu);
920         if (r != RESUME_GUEST)
921                 goto out;
922
923         guest_timing_enter_irqoff();
924         guest_state_enter_irqoff();
925         trace_kvm_enter(vcpu);
926         r = kvm_loongarch_ops->enter_guest(run, vcpu);
927
928         trace_kvm_out(vcpu);
929         /*
930          * Guest exit is already recorded at kvm_handle_exit()
931          * return value must not be RESUME_GUEST
932          */
933         local_irq_enable();
934 out:
935         kvm_sigset_deactivate(vcpu);
936         vcpu_put(vcpu);
937
938         return r;
939 }