1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_csr.h>
8 #include <asm/kvm_vcpu.h>
9
10 /*
11 * ktime_to_tick() - Scale ktime_t to timer tick value.
12 */
ktime_to_tick(struct kvm_vcpu * vcpu,ktime_t now)13 static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
14 {
15 u64 delta;
16
17 delta = ktime_to_ns(now);
18 return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
19 }
20
tick_to_ns(struct kvm_vcpu * vcpu,u64 tick)21 static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
22 {
23 return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
24 }
25
26 /*
27 * Push timer forward on timeout.
28 * Handle an hrtimer event by push the hrtimer forward a period.
29 */
kvm_count_timeout(struct kvm_vcpu * vcpu)30 static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
31 {
32 unsigned long cfg, period;
33
34 /* Add periodic tick to current expire time */
35 cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
36 if (cfg & CSR_TCFG_PERIOD) {
37 period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
38 hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
39 return HRTIMER_RESTART;
40 } else
41 return HRTIMER_NORESTART;
42 }
43
44 /* Low level hrtimer wake routine */
kvm_swtimer_wakeup(struct hrtimer * timer)45 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
46 {
47 struct kvm_vcpu *vcpu;
48
49 vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
50 kvm_queue_irq(vcpu, INT_TI);
51 rcuwait_wake_up(&vcpu->wait);
52
53 return kvm_count_timeout(vcpu);
54 }
55
56 /*
57 * Initialise the timer to the specified frequency, zero it
58 */
kvm_init_timer(struct kvm_vcpu * vcpu,unsigned long timer_hz)59 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
60 {
61 vcpu->arch.timer_mhz = timer_hz >> 20;
62
63 /* Starting at 0 */
64 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
65 }
66
67 /*
68 * Restore soft timer state from saved context.
69 */
kvm_restore_timer(struct kvm_vcpu * vcpu)70 void kvm_restore_timer(struct kvm_vcpu *vcpu)
71 {
72 unsigned long cfg, estat;
73 unsigned long ticks, delta, period;
74 ktime_t expire, now;
75 struct loongarch_csrs *csr = vcpu->arch.csr;
76
77 /*
78 * Set guest stable timer cfg csr
79 * Disable timer before restore estat CSR register, avoid to
80 * get invalid timer interrupt for old timer cfg
81 */
82 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
83
84 write_gcsr_timercfg(0);
85 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
86 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
87 if (!(cfg & CSR_TCFG_EN)) {
88 /* Guest timer is disabled, just restore timer registers */
89 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
90 return;
91 }
92
93 /*
94 * Freeze the soft-timer and sync the guest stable timer with it.
95 */
96 hrtimer_cancel(&vcpu->arch.swtimer);
97
98 /*
99 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
100 * If oneshot timer is fired, CSR TVAL will be -1, there are two
101 * conditions:
102 * 1) timer is fired during exiting to host
103 * 2) timer is fired and vm is doing timer irq, and then exiting to
104 * host. Host should not inject timer irq to avoid spurious
105 * timer interrupt again
106 */
107 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
108 estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
109 if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {
110 /*
111 * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
112 * and set CSR TVAL with -1
113 */
114 write_gcsr_timertick(0);
115
116 /*
117 * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
118 * timer interrupt, and CSR TVAL keeps unchanged with -1, it
119 * avoids spurious timer interrupt
120 */
121 if (!(estat & CPU_TIMER))
122 gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
123 return;
124 }
125
126 /*
127 * Set remainder tick value if not expired
128 */
129 delta = 0;
130 now = ktime_get();
131 expire = vcpu->arch.expire;
132 if (ktime_before(now, expire))
133 delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
134 else if (cfg & CSR_TCFG_PERIOD) {
135 period = cfg & CSR_TCFG_VAL;
136 delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
137 delta = period - (delta % period);
138
139 /*
140 * Inject timer here though sw timer should inject timer
141 * interrupt async already, since sw timer may be cancelled
142 * during injecting intr async
143 */
144 kvm_queue_irq(vcpu, INT_TI);
145 }
146
147 write_gcsr_timertick(delta);
148 }
149
150 /*
151 * Save guest timer state and switch to software emulation of guest
152 * timer. The hard timer must already be in use, so preemption should be
153 * disabled.
154 */
_kvm_save_timer(struct kvm_vcpu * vcpu)155 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
156 {
157 unsigned long ticks, delta, cfg;
158 ktime_t expire;
159 struct loongarch_csrs *csr = vcpu->arch.csr;
160
161 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
162 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
163
164 /*
165 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
166 * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
167 * If oneshot timer is fired, CSR TVAL will be -1
168 * Here judge one-shot timer fired by checking whether TVAL is larger
169 * than TCFG
170 */
171 if (ticks < cfg) {
172 delta = tick_to_ns(vcpu, ticks);
173 expire = ktime_add_ns(ktime_get(), delta);
174 vcpu->arch.expire = expire;
175
176 /*
177 * HRTIMER_MODE_PINNED is suggested since vcpu may run in
178 * the same physical cpu in next time
179 */
180 hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
181 } else if (vcpu->stat.generic.blocking) {
182 /*
183 * Inject timer interrupt so that halt polling can dectect and exit.
184 * VCPU is scheduled out already and sleeps in rcuwait queue and
185 * will not poll pending events again. kvm_queue_irq() is not enough,
186 * hrtimer swtimer should be used here.
187 */
188 expire = ktime_add_ns(ktime_get(), 10);
189 vcpu->arch.expire = expire;
190 hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
191 }
192 }
193
194 /*
195 * Save guest timer state and switch to soft guest timer if hard timer was in
196 * use.
197 */
kvm_save_timer(struct kvm_vcpu * vcpu)198 void kvm_save_timer(struct kvm_vcpu *vcpu)
199 {
200 struct loongarch_csrs *csr = vcpu->arch.csr;
201
202 preempt_disable();
203
204 /* Save hard timer state */
205 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
206 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
207 if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
208 _kvm_save_timer(vcpu);
209
210 /* Save timer-related state to vCPU context */
211 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
212 preempt_enable();
213 }
214
kvm_reset_timer(struct kvm_vcpu * vcpu)215 void kvm_reset_timer(struct kvm_vcpu *vcpu)
216 {
217 write_gcsr_timercfg(0);
218 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
219 hrtimer_cancel(&vcpu->arch.swtimer);
220 }
221