1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
5 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_bhyve_snapshot.h"
32
33 #include <sys/param.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39
40 #include <machine/vmm.h>
41 #include <machine/vmm_snapshot.h>
42
43 #include <dev/acpica/acpi_hpet.h>
44
45 #include <dev/vmm/vmm_dev.h>
46 #include <dev/vmm/vmm_ktr.h>
47 #include <dev/vmm/vmm_vm.h>
48
49 #include "vmm_lapic.h"
50 #include "vatpic.h"
51 #include "vioapic.h"
52 #include "vhpet.h"
53
54 static MALLOC_DEFINE(M_VHPET, "vhpet", "bhyve virtual hpet");
55
56 #define HPET_FREQ 16777216 /* 16.7 (2^24) Mhz */
57 #define FS_PER_S 1000000000000000ul
58
59 /* Timer N Configuration and Capabilities Register */
60 #define HPET_TCAP_RO_MASK (HPET_TCAP_INT_ROUTE | \
61 HPET_TCAP_FSB_INT_DEL | \
62 HPET_TCAP_SIZE | \
63 HPET_TCAP_PER_INT)
64 /*
65 * HPET requires at least 3 timers and up to 32 timers per block.
66 */
67 #define VHPET_NUM_TIMERS 8
68 CTASSERT(VHPET_NUM_TIMERS >= 3 && VHPET_NUM_TIMERS <= 32);
69
70 struct vhpet_callout_arg {
71 struct vhpet *vhpet;
72 int timer_num;
73 };
74
75 struct vhpet {
76 struct vm *vm;
77 struct mtx mtx;
78 sbintime_t freq_sbt;
79
80 uint64_t config; /* Configuration */
81 uint64_t isr; /* Interrupt Status */
82 uint32_t countbase; /* HPET counter base value */
83 sbintime_t countbase_sbt; /* uptime corresponding to base value */
84
85 struct {
86 uint64_t cap_config; /* Configuration */
87 uint64_t msireg; /* FSB interrupt routing */
88 uint32_t compval; /* Comparator */
89 uint32_t comprate;
90 struct callout callout;
91 sbintime_t callout_sbt; /* time when counter==compval */
92 struct vhpet_callout_arg arg;
93 } timer[VHPET_NUM_TIMERS];
94 };
95
96 #define VHPET_LOCK(vhp) mtx_lock(&((vhp)->mtx))
97 #define VHPET_UNLOCK(vhp) mtx_unlock(&((vhp)->mtx))
98
99 static void vhpet_start_timer(struct vhpet *vhpet, int n, uint32_t counter,
100 sbintime_t now);
101
102 static uint64_t
vhpet_capabilities(void)103 vhpet_capabilities(void)
104 {
105 uint64_t cap = 0;
106
107 cap |= 0x8086 << 16; /* vendor id */
108 cap |= (VHPET_NUM_TIMERS - 1) << 8; /* number of timers */
109 cap |= 1; /* revision */
110 cap &= ~HPET_CAP_COUNT_SIZE; /* 32-bit timer */
111
112 cap &= 0xffffffff;
113 cap |= (FS_PER_S / HPET_FREQ) << 32; /* tick period in fs */
114
115 return (cap);
116 }
117
118 static __inline bool
vhpet_counter_enabled(struct vhpet * vhpet)119 vhpet_counter_enabled(struct vhpet *vhpet)
120 {
121
122 return ((vhpet->config & HPET_CNF_ENABLE) ? true : false);
123 }
124
125 static __inline bool
vhpet_timer_msi_enabled(struct vhpet * vhpet,int n)126 vhpet_timer_msi_enabled(struct vhpet *vhpet, int n)
127 {
128 const uint64_t msi_enable = HPET_TCAP_FSB_INT_DEL | HPET_TCNF_FSB_EN;
129
130 if ((vhpet->timer[n].cap_config & msi_enable) == msi_enable)
131 return (true);
132 else
133 return (false);
134 }
135
136 static __inline int
vhpet_timer_ioapic_pin(struct vhpet * vhpet,int n)137 vhpet_timer_ioapic_pin(struct vhpet *vhpet, int n)
138 {
139 /*
140 * If the timer is configured to use MSI then treat it as if the
141 * timer is not connected to the ioapic.
142 */
143 if (vhpet_timer_msi_enabled(vhpet, n))
144 return (0);
145
146 return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ROUTE) >> 9);
147 }
148
149 static uint32_t
vhpet_counter(struct vhpet * vhpet,sbintime_t * nowptr)150 vhpet_counter(struct vhpet *vhpet, sbintime_t *nowptr)
151 {
152 uint32_t val;
153 sbintime_t now, delta;
154
155 val = vhpet->countbase;
156 if (vhpet_counter_enabled(vhpet)) {
157 now = sbinuptime();
158 delta = now - vhpet->countbase_sbt;
159 KASSERT(delta >= 0, ("vhpet_counter: uptime went backwards: "
160 "%#lx to %#lx", vhpet->countbase_sbt, now));
161 val += delta / vhpet->freq_sbt;
162 if (nowptr != NULL)
163 *nowptr = now;
164 } else {
165 /*
166 * The sbinuptime corresponding to the 'countbase' is
167 * meaningless when the counter is disabled. Make sure
168 * that the caller doesn't want to use it.
169 */
170 KASSERT(nowptr == NULL, ("vhpet_counter: nowptr must be NULL"));
171 }
172 return (val);
173 }
174
175 static void
vhpet_timer_clear_isr(struct vhpet * vhpet,int n)176 vhpet_timer_clear_isr(struct vhpet *vhpet, int n)
177 {
178 int pin;
179
180 if (vhpet->isr & (1 << n)) {
181 pin = vhpet_timer_ioapic_pin(vhpet, n);
182 KASSERT(pin != 0, ("vhpet timer %d irq incorrectly routed", n));
183 vioapic_deassert_irq(vhpet->vm, pin);
184 vhpet->isr &= ~(1 << n);
185 }
186 }
187
188 static __inline bool
vhpet_periodic_timer(struct vhpet * vhpet,int n)189 vhpet_periodic_timer(struct vhpet *vhpet, int n)
190 {
191
192 return ((vhpet->timer[n].cap_config & HPET_TCNF_TYPE) != 0);
193 }
194
195 static __inline bool
vhpet_timer_interrupt_enabled(struct vhpet * vhpet,int n)196 vhpet_timer_interrupt_enabled(struct vhpet *vhpet, int n)
197 {
198
199 return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ENB) != 0);
200 }
201
202 static __inline bool
vhpet_timer_edge_trig(struct vhpet * vhpet,int n)203 vhpet_timer_edge_trig(struct vhpet *vhpet, int n)
204 {
205
206 KASSERT(!vhpet_timer_msi_enabled(vhpet, n), ("vhpet_timer_edge_trig: "
207 "timer %d is using MSI", n));
208
209 if ((vhpet->timer[n].cap_config & HPET_TCNF_INT_TYPE) == 0)
210 return (true);
211 else
212 return (false);
213 }
214
215 static void
vhpet_timer_interrupt(struct vhpet * vhpet,int n)216 vhpet_timer_interrupt(struct vhpet *vhpet, int n)
217 {
218 int pin;
219
220 /* If interrupts are not enabled for this timer then just return. */
221 if (!vhpet_timer_interrupt_enabled(vhpet, n))
222 return;
223
224 /*
225 * If a level triggered interrupt is already asserted then just return.
226 */
227 if ((vhpet->isr & (1 << n)) != 0) {
228 VM_CTR1(vhpet->vm, "hpet t%d intr is already asserted", n);
229 return;
230 }
231
232 if (vhpet_timer_msi_enabled(vhpet, n)) {
233 lapic_intr_msi(vhpet->vm, vhpet->timer[n].msireg >> 32,
234 vhpet->timer[n].msireg & 0xffffffff);
235 return;
236 }
237
238 pin = vhpet_timer_ioapic_pin(vhpet, n);
239 if (pin == 0) {
240 VM_CTR1(vhpet->vm, "hpet t%d intr is not routed to ioapic", n);
241 return;
242 }
243
244 if (vhpet_timer_edge_trig(vhpet, n)) {
245 vioapic_pulse_irq(vhpet->vm, pin);
246 } else {
247 vhpet->isr |= 1 << n;
248 vioapic_assert_irq(vhpet->vm, pin);
249 }
250 }
251
252 static void
vhpet_adjust_compval(struct vhpet * vhpet,int n,uint32_t counter)253 vhpet_adjust_compval(struct vhpet *vhpet, int n, uint32_t counter)
254 {
255 uint32_t compval, comprate, compnext;
256
257 KASSERT(vhpet->timer[n].comprate != 0, ("hpet t%d is not periodic", n));
258
259 compval = vhpet->timer[n].compval;
260 comprate = vhpet->timer[n].comprate;
261
262 /*
263 * Calculate the comparator value to be used for the next periodic
264 * interrupt.
265 *
266 * This function is commonly called from the callout handler.
267 * In this scenario the 'counter' is ahead of 'compval'. To find
268 * the next value to program into the accumulator we divide the
269 * number space between 'compval' and 'counter' into 'comprate'
270 * sized units. The 'compval' is rounded up such that is "ahead"
271 * of 'counter'.
272 */
273 compnext = compval + ((counter - compval) / comprate + 1) * comprate;
274
275 vhpet->timer[n].compval = compnext;
276 }
277
278 static void
vhpet_handler(void * a)279 vhpet_handler(void *a)
280 {
281 int n;
282 uint32_t counter;
283 sbintime_t now;
284 struct vhpet *vhpet;
285 struct callout *callout;
286 struct vhpet_callout_arg *arg;
287
288 arg = a;
289 vhpet = arg->vhpet;
290 n = arg->timer_num;
291 callout = &vhpet->timer[n].callout;
292
293 VM_CTR1(vhpet->vm, "hpet t%d fired", n);
294
295 VHPET_LOCK(vhpet);
296
297 if (callout_pending(callout)) /* callout was reset */
298 goto done;
299
300 if (!callout_active(callout)) /* callout was stopped */
301 goto done;
302
303 callout_deactivate(callout);
304
305 if (!vhpet_counter_enabled(vhpet))
306 panic("vhpet(%p) callout with counter disabled", vhpet);
307
308 counter = vhpet_counter(vhpet, &now);
309 vhpet_start_timer(vhpet, n, counter, now);
310 vhpet_timer_interrupt(vhpet, n);
311 done:
312 VHPET_UNLOCK(vhpet);
313 return;
314 }
315
316 static void
vhpet_stop_timer(struct vhpet * vhpet,int n,sbintime_t now)317 vhpet_stop_timer(struct vhpet *vhpet, int n, sbintime_t now)
318 {
319
320 VM_CTR1(vhpet->vm, "hpet t%d stopped", n);
321 callout_stop(&vhpet->timer[n].callout);
322
323 /*
324 * If the callout was scheduled to expire in the past but hasn't
325 * had a chance to execute yet then trigger the timer interrupt
326 * here. Failing to do so will result in a missed timer interrupt
327 * in the guest. This is especially bad in one-shot mode because
328 * the next interrupt has to wait for the counter to wrap around.
329 */
330 if (vhpet->timer[n].callout_sbt < now) {
331 VM_CTR1(vhpet->vm, "hpet t%d interrupt triggered after "
332 "stopping timer", n);
333 vhpet_timer_interrupt(vhpet, n);
334 }
335 }
336
337 static void
vhpet_start_timer(struct vhpet * vhpet,int n,uint32_t counter,sbintime_t now)338 vhpet_start_timer(struct vhpet *vhpet, int n, uint32_t counter, sbintime_t now)
339 {
340 sbintime_t delta, precision;
341
342 if (vhpet->timer[n].comprate != 0)
343 vhpet_adjust_compval(vhpet, n, counter);
344 else {
345 /*
346 * In one-shot mode it is the guest's responsibility to make
347 * sure that the comparator value is not in the "past". The
348 * hardware doesn't have any belt-and-suspenders to deal with
349 * this so we don't either.
350 */
351 }
352
353 delta = (vhpet->timer[n].compval - counter) * vhpet->freq_sbt;
354 precision = delta >> tc_precexp;
355 vhpet->timer[n].callout_sbt = now + delta;
356 callout_reset_sbt(&vhpet->timer[n].callout, vhpet->timer[n].callout_sbt,
357 precision, vhpet_handler, &vhpet->timer[n].arg, C_ABSOLUTE);
358 }
359
360 static void
vhpet_start_counting(struct vhpet * vhpet)361 vhpet_start_counting(struct vhpet *vhpet)
362 {
363 int i;
364
365 vhpet->countbase_sbt = sbinuptime();
366 for (i = 0; i < VHPET_NUM_TIMERS; i++) {
367 /*
368 * Restart the timers based on the value of the main counter
369 * when it stopped counting.
370 */
371 vhpet_start_timer(vhpet, i, vhpet->countbase,
372 vhpet->countbase_sbt);
373 }
374 }
375
376 static void
vhpet_stop_counting(struct vhpet * vhpet,uint32_t counter,sbintime_t now)377 vhpet_stop_counting(struct vhpet *vhpet, uint32_t counter, sbintime_t now)
378 {
379 int i;
380
381 vhpet->countbase = counter;
382 for (i = 0; i < VHPET_NUM_TIMERS; i++)
383 vhpet_stop_timer(vhpet, i, now);
384 }
385
386 static __inline void
update_register(uint64_t * regptr,uint64_t data,uint64_t mask)387 update_register(uint64_t *regptr, uint64_t data, uint64_t mask)
388 {
389
390 *regptr &= ~mask;
391 *regptr |= (data & mask);
392 }
393
394 static void
vhpet_timer_update_config(struct vhpet * vhpet,int n,uint64_t data,uint64_t mask)395 vhpet_timer_update_config(struct vhpet *vhpet, int n, uint64_t data,
396 uint64_t mask)
397 {
398 bool clear_isr;
399 int old_pin, new_pin;
400 uint32_t allowed_irqs;
401 uint64_t oldval, newval;
402
403 if (vhpet_timer_msi_enabled(vhpet, n) ||
404 vhpet_timer_edge_trig(vhpet, n)) {
405 if (vhpet->isr & (1 << n))
406 panic("vhpet timer %d isr should not be asserted", n);
407 }
408 old_pin = vhpet_timer_ioapic_pin(vhpet, n);
409 oldval = vhpet->timer[n].cap_config;
410
411 newval = oldval;
412 update_register(&newval, data, mask);
413 newval &= ~(HPET_TCAP_RO_MASK | HPET_TCNF_32MODE);
414 newval |= oldval & HPET_TCAP_RO_MASK;
415
416 if (newval == oldval)
417 return;
418
419 vhpet->timer[n].cap_config = newval;
420 VM_CTR2(vhpet->vm, "hpet t%d cap_config set to 0x%016x", n, newval);
421
422 /*
423 * Validate the interrupt routing in the HPET_TCNF_INT_ROUTE field.
424 * If it does not match the bits set in HPET_TCAP_INT_ROUTE then set
425 * it to the default value of 0.
426 */
427 allowed_irqs = vhpet->timer[n].cap_config >> 32;
428 new_pin = vhpet_timer_ioapic_pin(vhpet, n);
429 if (new_pin != 0 && (allowed_irqs & (1 << new_pin)) == 0) {
430 VM_CTR3(vhpet->vm, "hpet t%d configured invalid irq %d, "
431 "allowed_irqs 0x%08x", n, new_pin, allowed_irqs);
432 new_pin = 0;
433 vhpet->timer[n].cap_config &= ~HPET_TCNF_INT_ROUTE;
434 }
435
436 if (!vhpet_periodic_timer(vhpet, n))
437 vhpet->timer[n].comprate = 0;
438
439 /*
440 * If the timer's ISR bit is set then clear it in the following cases:
441 * - interrupt is disabled
442 * - interrupt type is changed from level to edge or fsb.
443 * - interrupt routing is changed
444 *
445 * This is to ensure that this timer's level triggered interrupt does
446 * not remain asserted forever.
447 */
448 if (vhpet->isr & (1 << n)) {
449 KASSERT(old_pin != 0, ("timer %d isr asserted to ioapic pin %d",
450 n, old_pin));
451 if (!vhpet_timer_interrupt_enabled(vhpet, n))
452 clear_isr = true;
453 else if (vhpet_timer_msi_enabled(vhpet, n))
454 clear_isr = true;
455 else if (vhpet_timer_edge_trig(vhpet, n))
456 clear_isr = true;
457 else if (vhpet_timer_ioapic_pin(vhpet, n) != old_pin)
458 clear_isr = true;
459 else
460 clear_isr = false;
461
462 if (clear_isr) {
463 VM_CTR1(vhpet->vm, "hpet t%d isr cleared due to "
464 "configuration change", n);
465 vioapic_deassert_irq(vhpet->vm, old_pin);
466 vhpet->isr &= ~(1 << n);
467 }
468 }
469 }
470
471 int
vhpet_mmio_write(struct vcpu * vcpu,uint64_t gpa,uint64_t val,int size,void * arg)472 vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val, int size,
473 void *arg)
474 {
475 struct vhpet *vhpet;
476 uint64_t data, mask, oldval, val64;
477 uint32_t isr_clear_mask, old_compval, old_comprate, counter;
478 sbintime_t now, *nowptr;
479 int i, offset;
480
481 vhpet = vm_hpet(vcpu_vm(vcpu));
482 offset = gpa - VHPET_BASE;
483
484 VHPET_LOCK(vhpet);
485
486 /* Accesses to the HPET should be 4 or 8 bytes wide */
487 switch (size) {
488 case 8:
489 mask = 0xffffffffffffffff;
490 data = val;
491 break;
492 case 4:
493 mask = 0xffffffff;
494 data = val;
495 if ((offset & 0x4) != 0) {
496 mask <<= 32;
497 data <<= 32;
498 }
499 break;
500 default:
501 VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
502 "offset 0x%08x, size %d", offset, size);
503 goto done;
504 }
505
506 /* Access to the HPET should be naturally aligned to its width */
507 if (offset & (size - 1)) {
508 VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
509 "offset 0x%08x, size %d", offset, size);
510 goto done;
511 }
512
513 if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) {
514 /*
515 * Get the most recent value of the counter before updating
516 * the 'config' register. If the HPET is going to be disabled
517 * then we need to update 'countbase' with the value right
518 * before it is disabled.
519 */
520 nowptr = vhpet_counter_enabled(vhpet) ? &now : NULL;
521 counter = vhpet_counter(vhpet, nowptr);
522 oldval = vhpet->config;
523 update_register(&vhpet->config, data, mask);
524
525 /*
526 * LegacyReplacement Routing is not supported so clear the
527 * bit explicitly.
528 */
529 vhpet->config &= ~HPET_CNF_LEG_RT;
530
531 if ((oldval ^ vhpet->config) & HPET_CNF_ENABLE) {
532 if (vhpet_counter_enabled(vhpet)) {
533 vhpet_start_counting(vhpet);
534 VM_CTR0(vhpet->vm, "hpet enabled");
535 } else {
536 vhpet_stop_counting(vhpet, counter, now);
537 VM_CTR0(vhpet->vm, "hpet disabled");
538 }
539 }
540 goto done;
541 }
542
543 if (offset == HPET_ISR || offset == HPET_ISR + 4) {
544 isr_clear_mask = vhpet->isr & data;
545 for (i = 0; i < VHPET_NUM_TIMERS; i++) {
546 if ((isr_clear_mask & (1 << i)) != 0) {
547 VM_CTR1(vhpet->vm, "hpet t%d isr cleared", i);
548 vhpet_timer_clear_isr(vhpet, i);
549 }
550 }
551 goto done;
552 }
553
554 if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) {
555 /* Zero-extend the counter to 64-bits before updating it */
556 val64 = vhpet_counter(vhpet, NULL);
557 update_register(&val64, data, mask);
558 vhpet->countbase = val64;
559 if (vhpet_counter_enabled(vhpet))
560 vhpet_start_counting(vhpet);
561 goto done;
562 }
563
564 for (i = 0; i < VHPET_NUM_TIMERS; i++) {
565 if (offset == HPET_TIMER_CAP_CNF(i) ||
566 offset == HPET_TIMER_CAP_CNF(i) + 4) {
567 vhpet_timer_update_config(vhpet, i, data, mask);
568 break;
569 }
570
571 if (offset == HPET_TIMER_COMPARATOR(i) ||
572 offset == HPET_TIMER_COMPARATOR(i) + 4) {
573 old_compval = vhpet->timer[i].compval;
574 old_comprate = vhpet->timer[i].comprate;
575 if (vhpet_periodic_timer(vhpet, i)) {
576 /*
577 * In periodic mode writes to the comparator
578 * change the 'compval' register only if the
579 * HPET_TCNF_VAL_SET bit is set in the config
580 * register.
581 */
582 val64 = vhpet->timer[i].comprate;
583 update_register(&val64, data, mask);
584 vhpet->timer[i].comprate = val64;
585 if ((vhpet->timer[i].cap_config &
586 HPET_TCNF_VAL_SET) != 0) {
587 vhpet->timer[i].compval = val64;
588 }
589 } else {
590 KASSERT(vhpet->timer[i].comprate == 0,
591 ("vhpet one-shot timer %d has invalid "
592 "rate %u", i, vhpet->timer[i].comprate));
593 val64 = vhpet->timer[i].compval;
594 update_register(&val64, data, mask);
595 vhpet->timer[i].compval = val64;
596 }
597 vhpet->timer[i].cap_config &= ~HPET_TCNF_VAL_SET;
598
599 if (vhpet->timer[i].compval != old_compval ||
600 vhpet->timer[i].comprate != old_comprate) {
601 if (vhpet_counter_enabled(vhpet)) {
602 counter = vhpet_counter(vhpet, &now);
603 vhpet_start_timer(vhpet, i, counter,
604 now);
605 }
606 }
607 break;
608 }
609
610 if (offset == HPET_TIMER_FSB_VAL(i) ||
611 offset == HPET_TIMER_FSB_ADDR(i)) {
612 update_register(&vhpet->timer[i].msireg, data, mask);
613 break;
614 }
615 }
616 done:
617 VHPET_UNLOCK(vhpet);
618 return (0);
619 }
620
621 int
vhpet_mmio_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)622 vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
623 void *arg)
624 {
625 int i, offset;
626 struct vhpet *vhpet;
627 uint64_t data;
628
629 vhpet = vm_hpet(vcpu_vm(vcpu));
630 offset = gpa - VHPET_BASE;
631
632 VHPET_LOCK(vhpet);
633
634 /* Accesses to the HPET should be 4 or 8 bytes wide */
635 if (size != 4 && size != 8) {
636 VM_CTR2(vhpet->vm, "hpet invalid mmio read: "
637 "offset 0x%08x, size %d", offset, size);
638 data = 0;
639 goto done;
640 }
641
642 /* Access to the HPET should be naturally aligned to its width */
643 if (offset & (size - 1)) {
644 VM_CTR2(vhpet->vm, "hpet invalid mmio read: "
645 "offset 0x%08x, size %d", offset, size);
646 data = 0;
647 goto done;
648 }
649
650 if (offset == HPET_CAPABILITIES || offset == HPET_CAPABILITIES + 4) {
651 data = vhpet_capabilities();
652 goto done;
653 }
654
655 if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) {
656 data = vhpet->config;
657 goto done;
658 }
659
660 if (offset == HPET_ISR || offset == HPET_ISR + 4) {
661 data = vhpet->isr;
662 goto done;
663 }
664
665 if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) {
666 data = vhpet_counter(vhpet, NULL);
667 goto done;
668 }
669
670 for (i = 0; i < VHPET_NUM_TIMERS; i++) {
671 if (offset == HPET_TIMER_CAP_CNF(i) ||
672 offset == HPET_TIMER_CAP_CNF(i) + 4) {
673 data = vhpet->timer[i].cap_config;
674 break;
675 }
676
677 if (offset == HPET_TIMER_COMPARATOR(i) ||
678 offset == HPET_TIMER_COMPARATOR(i) + 4) {
679 data = vhpet->timer[i].compval;
680 break;
681 }
682
683 if (offset == HPET_TIMER_FSB_VAL(i) ||
684 offset == HPET_TIMER_FSB_ADDR(i)) {
685 data = vhpet->timer[i].msireg;
686 break;
687 }
688 }
689
690 if (i >= VHPET_NUM_TIMERS)
691 data = 0;
692 done:
693 VHPET_UNLOCK(vhpet);
694
695 if (size == 4) {
696 if (offset & 0x4)
697 data >>= 32;
698 }
699 *rval = data;
700 return (0);
701 }
702
703 struct vhpet *
vhpet_init(struct vm * vm)704 vhpet_init(struct vm *vm)
705 {
706 int i, pincount;
707 struct vhpet *vhpet;
708 uint64_t allowed_irqs;
709 struct vhpet_callout_arg *arg;
710 struct bintime bt;
711
712 vhpet = malloc(sizeof(struct vhpet), M_VHPET, M_WAITOK | M_ZERO);
713 vhpet->vm = vm;
714 mtx_init(&vhpet->mtx, "vhpet lock", NULL, MTX_DEF);
715
716 FREQ2BT(HPET_FREQ, &bt);
717 vhpet->freq_sbt = bttosbt(bt);
718
719 pincount = vioapic_pincount(vm);
720 if (pincount >= 32)
721 allowed_irqs = 0xff000000; /* irqs 24-31 */
722 else if (pincount >= 20)
723 allowed_irqs = 0xf << (pincount - 4); /* 4 upper irqs */
724 else
725 allowed_irqs = 0;
726
727 /*
728 * Initialize HPET timer hardware state.
729 */
730 for (i = 0; i < VHPET_NUM_TIMERS; i++) {
731 vhpet->timer[i].cap_config = allowed_irqs << 32;
732 vhpet->timer[i].cap_config |= HPET_TCAP_PER_INT;
733 vhpet->timer[i].cap_config |= HPET_TCAP_FSB_INT_DEL;
734
735 vhpet->timer[i].compval = 0xffffffff;
736 callout_init(&vhpet->timer[i].callout, 1);
737
738 arg = &vhpet->timer[i].arg;
739 arg->vhpet = vhpet;
740 arg->timer_num = i;
741 }
742
743 return (vhpet);
744 }
745
746 void
vhpet_cleanup(struct vhpet * vhpet)747 vhpet_cleanup(struct vhpet *vhpet)
748 {
749 int i;
750
751 for (i = 0; i < VHPET_NUM_TIMERS; i++)
752 callout_drain(&vhpet->timer[i].callout);
753
754 mtx_destroy(&vhpet->mtx);
755 free(vhpet, M_VHPET);
756 }
757
758 int
vhpet_getcap(struct vm_hpet_cap * cap)759 vhpet_getcap(struct vm_hpet_cap *cap)
760 {
761
762 cap->capabilities = vhpet_capabilities();
763 return (0);
764 }
765
766 #ifdef BHYVE_SNAPSHOT
767 int
vhpet_snapshot(struct vhpet * vhpet,struct vm_snapshot_meta * meta)768 vhpet_snapshot(struct vhpet *vhpet, struct vm_snapshot_meta *meta)
769 {
770 int i, ret;
771 uint32_t countbase;
772
773 SNAPSHOT_VAR_OR_LEAVE(vhpet->freq_sbt, meta, ret, done);
774 SNAPSHOT_VAR_OR_LEAVE(vhpet->config, meta, ret, done);
775 SNAPSHOT_VAR_OR_LEAVE(vhpet->isr, meta, ret, done);
776
777 /* at restore time the countbase should have the value it had when the
778 * snapshot was created; since the value is not directly kept in
779 * vhpet->countbase, but rather computed relative to the current system
780 * uptime using countbase_sbt, save the value returned by vhpet_counter
781 */
782 if (meta->op == VM_SNAPSHOT_SAVE)
783 countbase = vhpet_counter(vhpet, NULL);
784 SNAPSHOT_VAR_OR_LEAVE(countbase, meta, ret, done);
785 if (meta->op == VM_SNAPSHOT_RESTORE)
786 vhpet->countbase = countbase;
787
788 for (i = 0; i < nitems(vhpet->timer); i++) {
789 SNAPSHOT_VAR_OR_LEAVE(vhpet->timer[i].cap_config,
790 meta, ret, done);
791 SNAPSHOT_VAR_OR_LEAVE(vhpet->timer[i].msireg, meta, ret, done);
792 SNAPSHOT_VAR_OR_LEAVE(vhpet->timer[i].compval, meta, ret, done);
793 SNAPSHOT_VAR_OR_LEAVE(vhpet->timer[i].comprate, meta, ret, done);
794 SNAPSHOT_VAR_OR_LEAVE(vhpet->timer[i].callout_sbt,
795 meta, ret, done);
796 }
797
798 done:
799 return (ret);
800 }
801
802 int
vhpet_restore_time(struct vhpet * vhpet)803 vhpet_restore_time(struct vhpet *vhpet)
804 {
805 if (vhpet_counter_enabled(vhpet))
806 vhpet_start_counting(vhpet);
807
808 return (0);
809 }
810 #endif
811