xref: /kvm-unit-tests/x86/apic.c (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 #include "libcflat.h"
2 #include "apic.h"
3 #include "vm.h"
4 #include "smp.h"
5 #include "desc.h"
6 #include "isr.h"
7 #include "msr.h"
8 #include "atomic.h"
9 #include "fwcfg.h"
10 
11 #define MAX_TPR			0xf
12 
13 static void test_lapic_existence(void)
14 {
15     u8 version;
16 
17     version = (u8)apic_read(APIC_LVR);
18     printf("apic version: %x\n", version);
19     report(version >= 0x10 && version <= 0x15, "apic existence");
20 }
21 
22 #define TSC_DEADLINE_TIMER_VECTOR 0xef
23 #define BROADCAST_VECTOR 0xcf
24 
25 static int tdt_count;
26 
27 static void tsc_deadline_timer_isr(isr_regs_t *regs)
28 {
29     ++tdt_count;
30     eoi();
31 }
32 
33 static void __test_tsc_deadline_timer(void)
34 {
35     handle_irq(TSC_DEADLINE_TIMER_VECTOR, tsc_deadline_timer_isr);
36     irq_enable();
37 
38     wrmsr(MSR_IA32_TSCDEADLINE, rdmsr(MSR_IA32_TSC));
39     asm volatile ("nop");
40     report(tdt_count == 1, "tsc deadline timer");
41     report(rdmsr(MSR_IA32_TSCDEADLINE) == 0, "tsc deadline timer clearing");
42 }
43 
44 static int enable_tsc_deadline_timer(void)
45 {
46     uint32_t lvtt;
47 
48     if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
49         lvtt = APIC_LVT_TIMER_TSCDEADLINE | TSC_DEADLINE_TIMER_VECTOR;
50         apic_write(APIC_LVTT, lvtt);
51         return 1;
52     } else {
53         return 0;
54     }
55 }
56 
57 static void test_tsc_deadline_timer(void)
58 {
59     if(enable_tsc_deadline_timer()) {
60         __test_tsc_deadline_timer();
61     } else {
62         report_skip("tsc deadline timer not detected");
63     }
64 }
65 
66 static void do_write_apicbase(void *data)
67 {
68     wrmsr(MSR_IA32_APICBASE, *(u64 *)data);
69 }
70 
71 static bool test_write_apicbase_exception(u64 data)
72 {
73     return test_for_exception(GP_VECTOR, do_write_apicbase, &data);
74 }
75 
76 static void test_enable_x2apic(void)
77 {
78     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
79     u64 apicbase;
80 
81     if (enable_x2apic()) {
82         printf("x2apic enabled\n");
83 
84         apicbase = orig_apicbase & ~(APIC_EN | APIC_EXTD);
85         report(test_write_apicbase_exception(apicbase | APIC_EXTD),
86                "x2apic enabled to invalid state");
87         report(test_write_apicbase_exception(apicbase | APIC_EN),
88                "x2apic enabled to apic enabled");
89 
90         report(!test_write_apicbase_exception(apicbase | 0),
91                "x2apic enabled to disabled state");
92         report(test_write_apicbase_exception(apicbase | APIC_EXTD),
93                "disabled to invalid state");
94         report(test_write_apicbase_exception(apicbase | APIC_EN | APIC_EXTD),
95                "disabled to x2apic enabled");
96 
97         report(!test_write_apicbase_exception(apicbase | APIC_EN),
98                "apic disabled to apic enabled");
99         report(test_write_apicbase_exception(apicbase | APIC_EXTD),
100                "apic enabled to invalid state");
101 
102         if (orig_apicbase & APIC_EXTD)
103             enable_x2apic();
104         else
105             reset_apic();
106 
107         /*
108          * Disabling the APIC resets various APIC registers, restore them to
109          * their desired values.
110          */
111         apic_write(APIC_SPIV, 0x1ff);
112     } else {
113         printf("x2apic not detected\n");
114 
115         report(test_write_apicbase_exception(APIC_EN | APIC_EXTD),
116                "enable unsupported x2apic");
117     }
118 }
119 
120 static void verify_disabled_apic_mmio(void)
121 {
122     volatile u32 *lvr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_LVR);
123     volatile u32 *tpr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_TASKPRI);
124     u32 cr8 = read_cr8();
125 
126     memset((void *)APIC_DEFAULT_PHYS_BASE, 0xff, PAGE_SIZE);
127     report(*lvr == ~0, "*0xfee00030: %x", *lvr);
128     report(read_cr8() == cr8, "CR8: %lx", read_cr8());
129     write_cr8(cr8 ^ MAX_TPR);
130     report(read_cr8() == (cr8 ^ MAX_TPR), "CR8: %lx", read_cr8());
131     report(*tpr == ~0, "*0xfee00080: %x", *tpr);
132     write_cr8(cr8);
133 }
134 
135 static void test_apic_disable(void)
136 {
137     volatile u32 *lvr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_LVR);
138     volatile u32 *tpr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_TASKPRI);
139     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
140     u32 apic_version = apic_read(APIC_LVR);
141     u32 cr8 = read_cr8();
142 
143     report_prefix_push("apic_disable");
144     assert_msg(orig_apicbase & APIC_EN, "APIC not enabled.");
145 
146     disable_apic();
147     report(!(rdmsr(MSR_IA32_APICBASE) & APIC_EN), "Local apic disabled");
148     report(!this_cpu_has(X86_FEATURE_APIC),
149            "CPUID.1H:EDX.APIC[bit 9] is clear");
150     verify_disabled_apic_mmio();
151 
152     reset_apic();
153     report((rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) == APIC_EN,
154            "Local apic enabled in xAPIC mode");
155     report(this_cpu_has(X86_FEATURE_APIC), "CPUID.1H:EDX.APIC[bit 9] is set");
156     report(*lvr == apic_version, "*0xfee00030: %x", *lvr);
157     report(*tpr == cr8, "*0xfee00080: %x", *tpr);
158     write_cr8(cr8 ^ MAX_TPR);
159     report(*tpr == (cr8 ^ MAX_TPR) << 4, "*0xfee00080: %x", *tpr);
160     write_cr8(cr8);
161 
162     if (enable_x2apic()) {
163 	apic_write(APIC_SPIV, 0x1ff);
164 	report((rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) == (APIC_EN | APIC_EXTD),
165                "Local apic enabled in x2APIC mode");
166 	report(this_cpu_has(X86_FEATURE_APIC),
167                "CPUID.1H:EDX.APIC[bit 9] is set");
168 	verify_disabled_apic_mmio();
169 	if (!(orig_apicbase & APIC_EXTD))
170 	    reset_apic();
171     }
172     report_prefix_pop();
173 }
174 
175 #define ALTERNATE_APIC_BASE	0xfed40000
176 
177 static void test_apicbase(void)
178 {
179     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
180     u32 lvr = apic_read(APIC_LVR);
181     u64 value;
182 
183     wrmsr(MSR_IA32_APICBASE, orig_apicbase & ~(APIC_EN | APIC_EXTD));
184     wrmsr(MSR_IA32_APICBASE, ALTERNATE_APIC_BASE | APIC_BSP | APIC_EN);
185 
186     report_prefix_push("apicbase");
187 
188     report(*(volatile u32 *)(ALTERNATE_APIC_BASE + APIC_LVR) == lvr,
189            "relocate apic");
190 
191     value = orig_apicbase | (1UL << cpuid_maxphyaddr());
192     report(test_for_exception(GP_VECTOR, do_write_apicbase, &value),
193            "reserved physaddr bits");
194 
195     value = orig_apicbase | 1;
196     report(test_for_exception(GP_VECTOR, do_write_apicbase, &value),
197            "reserved low bits");
198 
199     wrmsr(MSR_IA32_APICBASE, orig_apicbase);
200     apic_write(APIC_SPIV, 0x1ff);
201 
202     report_prefix_pop();
203 }
204 
205 static void do_write_apic_id(void *id)
206 {
207     apic_write(APIC_ID, *(u32 *)id);
208 }
209 
210 static void __test_apic_id(void * unused)
211 {
212     u32 id, newid;
213     u8  initial_xapic_id = cpuid(1).b >> 24;
214     u32 initial_x2apic_id = cpuid(0xb).d;
215     bool x2apic_mode = rdmsr(MSR_IA32_APICBASE) & APIC_EXTD;
216 
217     if (x2apic_mode)
218         reset_apic();
219 
220     id = apic_id();
221     report(initial_xapic_id == id, "xapic id matches cpuid");
222 
223     newid = (id + 1) << 24;
224     report(!test_for_exception(GP_VECTOR, do_write_apic_id, &newid) &&
225            (id == apic_id() || id + 1 == apic_id()),
226            "writeable xapic id");
227 
228     if (!enable_x2apic())
229         goto out;
230 
231     report(test_for_exception(GP_VECTOR, do_write_apic_id, &newid),
232            "non-writeable x2apic id");
233     report(initial_xapic_id == (apic_id() & 0xff), "sane x2apic id");
234 
235     /* old QEMUs do not set initial x2APIC ID */
236     report(initial_xapic_id == (initial_x2apic_id & 0xff) &&
237            initial_x2apic_id == apic_id(),
238            "x2apic id matches cpuid");
239 
240 out:
241     reset_apic();
242 
243     report(initial_xapic_id == apic_id(), "correct xapic id after reset");
244 
245     /* old KVMs do not reset xAPIC ID */
246     if (id != apic_id())
247         apic_write(APIC_ID, id << 24);
248 
249     if (x2apic_mode)
250         enable_x2apic();
251 }
252 
253 static void test_apic_id(void)
254 {
255     if (cpu_count() < 2)
256         return;
257 
258     on_cpu(1, __test_apic_id, NULL);
259 }
260 
261 static int ipi_count;
262 
263 static void self_ipi_isr(isr_regs_t *regs)
264 {
265     ++ipi_count;
266     eoi();
267 }
268 
269 static void __test_self_ipi(void)
270 {
271     u64 start = rdtsc();
272     int vec = 0xf1;
273 
274     handle_irq(vec, self_ipi_isr);
275     irq_enable();
276     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | vec,
277                    id_map[0]);
278 
279     do {
280         pause();
281     } while (rdtsc() - start < 1000000000 && ipi_count == 0);
282 }
283 
284 static void test_self_ipi_xapic(void)
285 {
286     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
287 
288     report_prefix_push("self_ipi_xapic");
289 
290     /* Reset to xAPIC mode. */
291     reset_apic();
292     report((rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) == APIC_EN,
293            "Local apic enabled in xAPIC mode");
294 
295     ipi_count = 0;
296     __test_self_ipi();
297     report(ipi_count == 1, "self ipi");
298 
299     /* Enable x2APIC mode if it was already enabled. */
300     if (orig_apicbase & APIC_EXTD)
301         enable_x2apic();
302 
303     report_prefix_pop();
304 }
305 
306 static void test_self_ipi_x2apic(void)
307 {
308     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
309 
310     report_prefix_push("self_ipi_x2apic");
311 
312     if (enable_x2apic()) {
313         report((rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) ==
314                (APIC_EN | APIC_EXTD),
315                "Local apic enabled in x2APIC mode");
316 
317         ipi_count = 0;
318         __test_self_ipi();
319         report(ipi_count == 1, "self ipi");
320 
321         /* Reset to xAPIC mode unless x2APIC was already enabled. */
322         if (!(orig_apicbase & APIC_EXTD))
323             reset_apic();
324     } else {
325         report_skip("x2apic not detected");
326     }
327 
328     report_prefix_pop();
329 }
330 
331 volatile int nmi_counter_private, nmi_counter, nmi_hlt_counter, sti_loop_active;
332 
333 static void sti_nop(char *p)
334 {
335     asm volatile (
336 		  ".globl post_sti \n\t"
337 		  "sti \n"
338 		  /*
339 		   * vmx won't exit on external interrupt if blocked-by-sti,
340 		   * so give it a reason to exit by accessing an unmapped page.
341 		   */
342 		  "post_sti: testb $0, %0 \n\t"
343 		  "nop \n\t"
344 		  "cli"
345 		  : : "m"(*p)
346 		  );
347     nmi_counter = nmi_counter_private;
348 }
349 
350 static void sti_loop(void *ignore)
351 {
352     unsigned k = 0;
353 
354     while (sti_loop_active) {
355 	sti_nop((char *)(ulong)((k++ * 4096) % (128 * 1024 * 1024)));
356     }
357 }
358 
359 static void nmi_handler(isr_regs_t *regs)
360 {
361     extern void post_sti(void);
362     ++nmi_counter_private;
363     nmi_hlt_counter += regs->rip == (ulong)post_sti;
364 }
365 
366 static void test_sti_nmi(void)
367 {
368     unsigned old_counter;
369 
370     if (cpu_count() < 2) {
371 	return;
372     }
373 
374     handle_irq(2, nmi_handler);
375     on_cpu(1, update_cr3, (void *)read_cr3());
376 
377     sti_loop_active = 1;
378     on_cpu_async(1, sti_loop, 0);
379     while (nmi_counter < 30000) {
380 	old_counter = nmi_counter;
381 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[1]);
382 	while (nmi_counter == old_counter) {
383 	    ;
384 	}
385     }
386     sti_loop_active = 0;
387     report(nmi_hlt_counter == 0, "nmi-after-sti");
388 }
389 
390 static volatile bool nmi_done, nmi_flushed;
391 static volatile int nmi_received;
392 static volatile int cpu0_nmi_ctr1, cpu1_nmi_ctr1;
393 static volatile int cpu0_nmi_ctr2, cpu1_nmi_ctr2;
394 
395 static void multiple_nmi_handler(isr_regs_t *regs)
396 {
397     ++nmi_received;
398 }
399 
400 static void kick_me_nmi(void *blah)
401 {
402     while (!nmi_done) {
403 	++cpu1_nmi_ctr1;
404 	while (cpu1_nmi_ctr1 != cpu0_nmi_ctr1 && !nmi_done) {
405 	    pause();
406 	}
407 	if (nmi_done) {
408 	    return;
409 	}
410 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
411 	/* make sure the NMI has arrived by sending an IPI after it */
412 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT
413 		       | 0x44, id_map[0]);
414 	++cpu1_nmi_ctr2;
415 	while (cpu1_nmi_ctr2 != cpu0_nmi_ctr2 && !nmi_done) {
416 	    pause();
417 	}
418     }
419 }
420 
421 static void flush_nmi(isr_regs_t *regs)
422 {
423     nmi_flushed = true;
424     apic_write(APIC_EOI, 0);
425 }
426 
427 static void test_multiple_nmi(void)
428 {
429     int i;
430     bool ok = true;
431 
432     if (cpu_count() < 2) {
433 	return;
434     }
435 
436     sti();
437     handle_irq(2, multiple_nmi_handler);
438     handle_irq(0x44, flush_nmi);
439     on_cpu_async(1, kick_me_nmi, 0);
440     for (i = 0; i < 100000; ++i) {
441 	nmi_flushed = false;
442 	nmi_received = 0;
443 	++cpu0_nmi_ctr1;
444 	while (cpu1_nmi_ctr1 != cpu0_nmi_ctr1) {
445 	    pause();
446 	}
447 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
448 	while (!nmi_flushed) {
449 	    pause();
450 	}
451 	if (nmi_received != 2) {
452 	    ok = false;
453 	    break;
454 	}
455 	++cpu0_nmi_ctr2;
456 	while (cpu1_nmi_ctr2 != cpu0_nmi_ctr2) {
457 	    pause();
458 	}
459     }
460     nmi_done = true;
461     report(ok, "multiple nmi");
462 }
463 
464 static void pending_nmi_handler(isr_regs_t *regs)
465 {
466     int i;
467 
468     if (++nmi_received == 1) {
469         for (i = 0; i < 10; ++i)
470             apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
471     }
472 }
473 
474 static void test_pending_nmi(void)
475 {
476     int i;
477 
478     handle_irq(2, pending_nmi_handler);
479     for (i = 0; i < 100000; ++i) {
480 	    nmi_received = 0;
481 
482         apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
483         while (nmi_received < 2)
484             pause();
485 
486         if (nmi_received != 2)
487             break;
488     }
489     report(nmi_received == 2, "pending nmi");
490 }
491 
492 static volatile int lvtt_counter = 0;
493 
494 static void lvtt_handler(isr_regs_t *regs)
495 {
496     lvtt_counter++;
497     eoi();
498 }
499 
500 static void test_apic_timer_one_shot(void)
501 {
502     uint64_t tsc1, tsc2;
503     static const uint32_t interval = 0x10000;
504 
505 #define APIC_LVT_TIMER_VECTOR    (0xee)
506 
507     handle_irq(APIC_LVT_TIMER_VECTOR, lvtt_handler);
508     irq_enable();
509 
510     /* One shot mode */
511     apic_write(APIC_LVTT, APIC_LVT_TIMER_ONESHOT |
512                APIC_LVT_TIMER_VECTOR);
513     /* Divider == 1 */
514     apic_write(APIC_TDCR, 0x0000000b);
515 
516     tsc1 = rdtsc();
517     /* Set "Initial Counter Register", which starts the timer */
518     apic_write(APIC_TMICT, interval);
519     while (!lvtt_counter);
520     tsc2 = rdtsc();
521 
522     /*
523      * For LVT Timer clock, SDM vol 3 10.5.4 says it should be
524      * derived from processor's bus clock (IIUC which is the same
525      * as TSC), however QEMU seems to be using nanosecond. In all
526      * cases, the following should satisfy on all modern
527      * processors.
528      */
529     report((lvtt_counter == 1) && (tsc2 - tsc1 >= interval),
530            "APIC LVT timer one shot");
531 }
532 
533 static atomic_t broadcast_counter;
534 
535 static void broadcast_handler(isr_regs_t *regs)
536 {
537 	atomic_inc(&broadcast_counter);
538 	eoi();
539 }
540 
541 static bool broadcast_received(unsigned ncpus)
542 {
543 	unsigned counter;
544 	u64 start = rdtsc();
545 
546 	do {
547 		counter = atomic_read(&broadcast_counter);
548 		if (counter >= ncpus)
549 			break;
550 		pause();
551 	} while (rdtsc() - start < 1000000000);
552 
553 	atomic_set(&broadcast_counter, 0);
554 
555 	return counter == ncpus;
556 }
557 
558 static void test_physical_broadcast(void)
559 {
560 	unsigned ncpus = cpu_count();
561 	unsigned long cr3 = read_cr3();
562 	u32 broadcast_address = enable_x2apic() ? 0xffffffff : 0xff;
563 
564 	handle_irq(BROADCAST_VECTOR, broadcast_handler);
565 	for (int c = 1; c < ncpus; c++)
566 		on_cpu(c, update_cr3, (void *)cr3);
567 
568 	printf("starting broadcast (%s)\n", enable_x2apic() ? "x2apic" : "xapic");
569 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT |
570 			BROADCAST_VECTOR, broadcast_address);
571 	report(broadcast_received(ncpus), "APIC physical broadcast address");
572 
573 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT |
574 			BROADCAST_VECTOR | APIC_DEST_ALLINC, 0);
575 	report(broadcast_received(ncpus), "APIC physical broadcast shorthand");
576 }
577 
578 static void wait_until_tmcct_common(uint32_t initial_count, bool stop_when_half, bool should_wrap_around)
579 {
580 	uint32_t tmcct = apic_read(APIC_TMCCT);
581 
582 	if (tmcct) {
583 		while (tmcct > (initial_count / 2))
584 			tmcct = apic_read(APIC_TMCCT);
585 
586 		if ( stop_when_half )
587 			return;
588 
589 		/* Wait until the counter reach 0 or wrap-around */
590 		while ( tmcct <= (initial_count / 2) && tmcct > 0 )
591 			tmcct = apic_read(APIC_TMCCT);
592 
593 		/* Wait specifically for wrap around to skip 0 TMCCR if we were asked to */
594 		while (should_wrap_around && !tmcct)
595 			tmcct = apic_read(APIC_TMCCT);
596 	}
597 }
598 
599 static void wait_until_tmcct_is_zero(uint32_t initial_count, bool stop_when_half)
600 {
601 	return wait_until_tmcct_common(initial_count, stop_when_half, false);
602 }
603 
604 static void wait_until_tmcct_wrap_around(uint32_t initial_count, bool stop_when_half)
605 {
606 	return wait_until_tmcct_common(initial_count, stop_when_half, true);
607 }
608 
609 static inline void apic_change_mode(unsigned long new_mode)
610 {
611 	uint32_t lvtt;
612 
613 	lvtt = apic_read(APIC_LVTT);
614 	apic_write(APIC_LVTT, (lvtt & ~APIC_LVT_TIMER_MASK) | new_mode);
615 }
616 
617 static void test_apic_change_mode(void)
618 {
619 	uint32_t tmict = 0x999999;
620 
621 	printf("starting apic change mode\n");
622 
623 	apic_write(APIC_TMICT, tmict);
624 
625 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
626 
627 	report(apic_read(APIC_TMICT) == tmict, "TMICT value reset");
628 
629 	/* Testing one-shot */
630 	apic_change_mode(APIC_LVT_TIMER_ONESHOT);
631 	apic_write(APIC_TMICT, tmict);
632 	report(apic_read(APIC_TMCCT), "TMCCT should have a non-zero value");
633 
634 	wait_until_tmcct_is_zero(tmict, false);
635 	report(!apic_read(APIC_TMCCT), "TMCCT should have reached 0");
636 
637 	/*
638 	 * Write TMICT before changing mode from one-shot to periodic TMCCT should
639 	 * be reset to TMICT periodicly
640 	 */
641 	apic_write(APIC_TMICT, tmict);
642 	wait_until_tmcct_is_zero(tmict, true);
643 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
644 	report(apic_read(APIC_TMCCT), "TMCCT should have a non-zero value");
645 
646 	/*
647 	 * After the change of mode, the counter should not be reset and continue
648 	 * counting down from where it was
649 	 */
650 	report(apic_read(APIC_TMCCT) < (tmict / 2),
651 	       "TMCCT should not be reset to TMICT value");
652 	/*
653 	 * Specifically wait for timer wrap around and skip 0.
654 	 * Under KVM lapic there is a possibility that a small amount of consecutive
655 	 * TMCCR reads return 0 while hrtimer is reset in an async callback
656 	 */
657 	wait_until_tmcct_wrap_around(tmict, false);
658 	report(apic_read(APIC_TMCCT) > (tmict / 2),
659 	       "TMCCT should be reset to the initial-count");
660 
661 	wait_until_tmcct_is_zero(tmict, true);
662 	/*
663 	 * Keep the same TMICT and change timer mode to one-shot
664 	 * TMCCT should be > 0 and count-down to 0
665 	 */
666 	apic_change_mode(APIC_LVT_TIMER_ONESHOT);
667 	report(apic_read(APIC_TMCCT) < (tmict / 2),
668 	       "TMCCT should not be reset to init");
669 	wait_until_tmcct_is_zero(tmict, false);
670 	report(!apic_read(APIC_TMCCT), "TMCCT should have reach zero");
671 
672 	/* now tmcct == 0 and tmict != 0 */
673 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
674 	report(!apic_read(APIC_TMCCT), "TMCCT should stay at zero");
675 }
676 
677 #define KVM_HC_SEND_IPI 10
678 
679 static void test_pv_ipi(void)
680 {
681     int ret;
682     unsigned long a0 = 0xFFFFFFFF, a1 = 0, a2 = 0xFFFFFFFF, a3 = 0x0;
683 
684     asm volatile("vmcall" : "=a"(ret) :"a"(KVM_HC_SEND_IPI), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
685     report(!ret, "PV IPIs testing");
686 }
687 
688 int main(void)
689 {
690     setup_vm();
691 
692     test_lapic_existence();
693 
694     mask_pic_interrupts();
695     test_apic_id();
696     test_apic_disable();
697     test_enable_x2apic();
698     test_apicbase();
699 
700     test_self_ipi_xapic();
701     test_self_ipi_x2apic();
702     test_physical_broadcast();
703     if (test_device_enabled())
704         test_pv_ipi();
705 
706     test_sti_nmi();
707     test_multiple_nmi();
708     test_pending_nmi();
709 
710     test_apic_timer_one_shot();
711     test_apic_change_mode();
712     test_tsc_deadline_timer();
713 
714     return report_summary();
715 }
716