1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_csr.h>
8 #include <asm/kvm_vcpu.h>
11 * ktime_to_tick() - Scale ktime_t to timer tick value.
13 static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
17 delta = ktime_to_ns(now);
18 return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
21 static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
23 return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
27 * Push timer forward on timeout.
28 * Handle an hrtimer event by push the hrtimer forward a period.
30 static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
32 unsigned long cfg, period;
34 /* Add periodic tick to current expire time */
35 cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
36 if (cfg & CSR_TCFG_PERIOD) {
37 period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
38 hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
39 return HRTIMER_RESTART;
41 return HRTIMER_NORESTART;
44 /* Low level hrtimer wake routine */
45 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
47 struct kvm_vcpu *vcpu;
49 vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
50 kvm_queue_irq(vcpu, INT_TI);
51 rcuwait_wake_up(&vcpu->wait);
53 return kvm_count_timeout(vcpu);
57 * Initialise the timer to the specified frequency, zero it
59 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
61 vcpu->arch.timer_mhz = timer_hz >> 20;
64 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
68 * Restore hard timer state and enable guest to access timer registers
69 * without trap, should be called with irq disabled
71 void kvm_acquire_timer(struct kvm_vcpu *vcpu)
75 cfg = read_csr_gcfg();
76 if (!(cfg & CSR_GCFG_TIT))
79 /* Enable guest access to hard timer */
80 write_csr_gcfg(cfg & ~CSR_GCFG_TIT);
83 * Freeze the soft-timer and sync the guest stable timer with it. We do
84 * this with interrupts disabled to avoid latency.
86 hrtimer_cancel(&vcpu->arch.swtimer);
90 * Restore soft timer state from saved context.
92 void kvm_restore_timer(struct kvm_vcpu *vcpu)
94 unsigned long cfg, delta, period;
96 struct loongarch_csrs *csr = vcpu->arch.csr;
99 * Set guest stable timer cfg csr
101 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
102 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
103 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
104 if (!(cfg & CSR_TCFG_EN)) {
105 /* Guest timer is disabled, just restore timer registers */
106 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
111 * Set remainder tick value if not expired
114 expire = vcpu->arch.expire;
115 if (ktime_before(now, expire))
116 delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
118 if (cfg & CSR_TCFG_PERIOD) {
119 period = cfg & CSR_TCFG_VAL;
120 delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
121 delta = period - (delta % period);
125 * Inject timer here though sw timer should inject timer
126 * interrupt async already, since sw timer may be cancelled
127 * during injecting intr async in function kvm_acquire_timer
129 kvm_queue_irq(vcpu, INT_TI);
132 write_gcsr_timertick(delta);
136 * Save guest timer state and switch to software emulation of guest
137 * timer. The hard timer must already be in use, so preemption should be
140 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
142 unsigned long ticks, delta;
144 struct loongarch_csrs *csr = vcpu->arch.csr;
146 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
147 delta = tick_to_ns(vcpu, ticks);
148 expire = ktime_add_ns(ktime_get(), delta);
149 vcpu->arch.expire = expire;
152 * Update hrtimer to use new timeout
153 * HRTIMER_MODE_PINNED is suggested since vcpu may run in
154 * the same physical cpu in next time
156 hrtimer_cancel(&vcpu->arch.swtimer);
157 hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
160 * Inject timer interrupt so that hall polling can dectect and exit
162 kvm_queue_irq(vcpu, INT_TI);
166 * Save guest timer state and switch to soft guest timer if hard timer was in
169 void kvm_save_timer(struct kvm_vcpu *vcpu)
172 struct loongarch_csrs *csr = vcpu->arch.csr;
175 cfg = read_csr_gcfg();
176 if (!(cfg & CSR_GCFG_TIT)) {
177 /* Disable guest use of hard timer */
178 write_csr_gcfg(cfg | CSR_GCFG_TIT);
180 /* Save hard timer state */
181 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
182 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
183 if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
184 _kvm_save_timer(vcpu);
187 /* Save timer-related state to vCPU context */
188 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
192 void kvm_reset_timer(struct kvm_vcpu *vcpu)
194 write_gcsr_timercfg(0);
195 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
196 hrtimer_cancel(&vcpu->arch.swtimer);