xref: /kvm-unit-tests/x86/apic.c (revision 03b1e4570f9678c59a1fdcd0428d21681541602e)
1 #include "libcflat.h"
2 #include "apic.h"
3 #include "vm.h"
4 #include "smp.h"
5 #include "desc.h"
6 #include "isr.h"
7 #include "msr.h"
8 #include "atomic.h"
9 #include "fwcfg.h"
10 
11 #define MAX_TPR			0xf
12 
13 static void test_lapic_existence(void)
14 {
15     u8 version;
16 
17     version = (u8)apic_read(APIC_LVR);
18     printf("apic version: %x\n", version);
19     report("apic existence", version >= 0x10 && version <= 0x15);
20 }
21 
22 #define TSC_DEADLINE_TIMER_VECTOR 0xef
23 #define BROADCAST_VECTOR 0xcf
24 
25 static int tdt_count;
26 
27 static void tsc_deadline_timer_isr(isr_regs_t *regs)
28 {
29     ++tdt_count;
30     eoi();
31 }
32 
33 static void __test_tsc_deadline_timer(void)
34 {
35     handle_irq(TSC_DEADLINE_TIMER_VECTOR, tsc_deadline_timer_isr);
36     irq_enable();
37 
38     wrmsr(MSR_IA32_TSCDEADLINE, rdmsr(MSR_IA32_TSC));
39     asm volatile ("nop");
40     report("tsc deadline timer", tdt_count == 1);
41     report("tsc deadline timer clearing", rdmsr(MSR_IA32_TSCDEADLINE) == 0);
42 }
43 
44 static int enable_tsc_deadline_timer(void)
45 {
46     uint32_t lvtt;
47 
48     if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
49         lvtt = APIC_LVT_TIMER_TSCDEADLINE | TSC_DEADLINE_TIMER_VECTOR;
50         apic_write(APIC_LVTT, lvtt);
51         return 1;
52     } else {
53         return 0;
54     }
55 }
56 
57 static void test_tsc_deadline_timer(void)
58 {
59     if(enable_tsc_deadline_timer()) {
60         __test_tsc_deadline_timer();
61     } else {
62         report_skip("tsc deadline timer not detected");
63     }
64 }
65 
66 static void do_write_apicbase(void *data)
67 {
68     wrmsr(MSR_IA32_APICBASE, *(u64 *)data);
69 }
70 
71 static bool test_write_apicbase_exception(u64 data)
72 {
73     return test_for_exception(GP_VECTOR, do_write_apicbase, &data);
74 }
75 
76 static void test_enable_x2apic(void)
77 {
78     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
79     u64 apicbase;
80 
81     if (enable_x2apic()) {
82         printf("x2apic enabled\n");
83 
84         apicbase = orig_apicbase & ~(APIC_EN | APIC_EXTD);
85         report("x2apic enabled to invalid state",
86                test_write_apicbase_exception(apicbase | APIC_EXTD));
87         report("x2apic enabled to apic enabled",
88                test_write_apicbase_exception(apicbase | APIC_EN));
89 
90         report("x2apic enabled to disabled state",
91                !test_write_apicbase_exception(apicbase | 0));
92         report("disabled to invalid state",
93                test_write_apicbase_exception(apicbase | APIC_EXTD));
94         report("disabled to x2apic enabled",
95                test_write_apicbase_exception(apicbase | APIC_EN | APIC_EXTD));
96 
97         report("apic disabled to apic enabled",
98                !test_write_apicbase_exception(apicbase | APIC_EN));
99         report("apic enabled to invalid state",
100                test_write_apicbase_exception(apicbase | APIC_EXTD));
101 
102         if (orig_apicbase & APIC_EXTD)
103             enable_x2apic();
104         else
105             reset_apic();
106 
107         /*
108          * Disabling the APIC resets various APIC registers, restore them to
109          * their desired values.
110          */
111         apic_write(APIC_SPIV, 0x1ff);
112     } else {
113         printf("x2apic not detected\n");
114 
115         report("enable unsupported x2apic",
116                test_write_apicbase_exception(APIC_EN | APIC_EXTD));
117     }
118 }
119 
120 static void verify_disabled_apic_mmio(void)
121 {
122     volatile u32 *lvr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_LVR);
123     volatile u32 *tpr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_TASKPRI);
124     u32 cr8 = read_cr8();
125 
126     memset((void *)APIC_DEFAULT_PHYS_BASE, 0xff, PAGE_SIZE);
127     report("*0xfee00030: %x", *lvr == ~0, *lvr);
128     report("CR8: %lx", read_cr8() == cr8, read_cr8());
129     write_cr8(cr8 ^ MAX_TPR);
130     report("CR8: %lx", read_cr8() == (cr8 ^ MAX_TPR), read_cr8());
131     report("*0xfee00080: %x", *tpr == ~0, *tpr);
132     write_cr8(cr8);
133 }
134 
135 static void test_apic_disable(void)
136 {
137     volatile u32 *lvr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_LVR);
138     volatile u32 *tpr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_TASKPRI);
139     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
140     u32 apic_version = apic_read(APIC_LVR);
141     u32 cr8 = read_cr8();
142 
143     report_prefix_push("apic_disable");
144     assert_msg(orig_apicbase & APIC_EN, "APIC not enabled.");
145 
146     disable_apic();
147     report("Local apic disabled", !(rdmsr(MSR_IA32_APICBASE) & APIC_EN));
148     report("CPUID.1H:EDX.APIC[bit 9] is clear", !this_cpu_has(X86_FEATURE_APIC));
149     verify_disabled_apic_mmio();
150 
151     reset_apic();
152     report("Local apic enabled in xAPIC mode",
153 	   (rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) == APIC_EN);
154     report("CPUID.1H:EDX.APIC[bit 9] is set", this_cpu_has(X86_FEATURE_APIC));
155     report("*0xfee00030: %x", *lvr == apic_version, *lvr);
156     report("*0xfee00080: %x", *tpr == cr8, *tpr);
157     write_cr8(cr8 ^ MAX_TPR);
158     report("*0xfee00080: %x", *tpr == (cr8 ^ MAX_TPR) << 4, *tpr);
159     write_cr8(cr8);
160 
161     if (enable_x2apic()) {
162 	apic_write(APIC_SPIV, 0x1ff);
163 	report("Local apic enabled in x2APIC mode",
164 	   (rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) ==
165 	   (APIC_EN | APIC_EXTD));
166 	report("CPUID.1H:EDX.APIC[bit 9] is set", this_cpu_has(X86_FEATURE_APIC));
167 	verify_disabled_apic_mmio();
168 	if (!(orig_apicbase & APIC_EXTD))
169 	    reset_apic();
170     }
171     report_prefix_pop();
172 }
173 
174 #define ALTERNATE_APIC_BASE	0xfed40000
175 
176 static void test_apicbase(void)
177 {
178     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
179     u32 lvr = apic_read(APIC_LVR);
180     u64 value;
181 
182     wrmsr(MSR_IA32_APICBASE, orig_apicbase & ~(APIC_EN | APIC_EXTD));
183     wrmsr(MSR_IA32_APICBASE, ALTERNATE_APIC_BASE | APIC_BSP | APIC_EN);
184 
185     report_prefix_push("apicbase");
186 
187     report("relocate apic",
188            *(volatile u32 *)(ALTERNATE_APIC_BASE + APIC_LVR) == lvr);
189 
190     value = orig_apicbase | (1UL << cpuid_maxphyaddr());
191     report("reserved physaddr bits",
192            test_for_exception(GP_VECTOR, do_write_apicbase, &value));
193 
194     value = orig_apicbase | 1;
195     report("reserved low bits",
196            test_for_exception(GP_VECTOR, do_write_apicbase, &value));
197 
198     wrmsr(MSR_IA32_APICBASE, orig_apicbase);
199     apic_write(APIC_SPIV, 0x1ff);
200 
201     report_prefix_pop();
202 }
203 
204 static void do_write_apic_id(void *id)
205 {
206     apic_write(APIC_ID, *(u32 *)id);
207 }
208 
209 static void __test_apic_id(void * unused)
210 {
211     u32 id, newid;
212     u8  initial_xapic_id = cpuid(1).b >> 24;
213     u32 initial_x2apic_id = cpuid(0xb).d;
214     bool x2apic_mode = rdmsr(MSR_IA32_APICBASE) & APIC_EXTD;
215 
216     if (x2apic_mode)
217         reset_apic();
218 
219     id = apic_id();
220     report("xapic id matches cpuid", initial_xapic_id == id);
221 
222     newid = (id + 1) << 24;
223     report("writeable xapic id",
224             !test_for_exception(GP_VECTOR, do_write_apic_id, &newid) &&
225 	    (id == apic_id() || id + 1 == apic_id()));
226 
227     if (!enable_x2apic())
228         goto out;
229 
230     report("non-writeable x2apic id",
231             test_for_exception(GP_VECTOR, do_write_apic_id, &newid));
232     report("sane x2apic id", initial_xapic_id == (apic_id() & 0xff));
233 
234     /* old QEMUs do not set initial x2APIC ID */
235     report("x2apic id matches cpuid",
236            initial_xapic_id == (initial_x2apic_id & 0xff) &&
237            initial_x2apic_id == apic_id());
238 
239 out:
240     reset_apic();
241 
242     report("correct xapic id after reset", initial_xapic_id == apic_id());
243 
244     /* old KVMs do not reset xAPIC ID */
245     if (id != apic_id())
246         apic_write(APIC_ID, id << 24);
247 
248     if (x2apic_mode)
249         enable_x2apic();
250 }
251 
252 static void test_apic_id(void)
253 {
254     if (cpu_count() < 2)
255         return;
256 
257     on_cpu(1, __test_apic_id, NULL);
258 }
259 
260 static int ipi_count;
261 
262 static void self_ipi_isr(isr_regs_t *regs)
263 {
264     ++ipi_count;
265     eoi();
266 }
267 
268 static void test_self_ipi(void)
269 {
270     u64 start = rdtsc();
271     int vec = 0xf1;
272 
273     handle_irq(vec, self_ipi_isr);
274     irq_enable();
275     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | vec,
276                    id_map[0]);
277 
278     do {
279         pause();
280     } while (rdtsc() - start < 1000000000 && ipi_count == 0);
281 
282     report("self ipi", ipi_count == 1);
283 }
284 
285 volatile int nmi_counter_private, nmi_counter, nmi_hlt_counter, sti_loop_active;
286 
287 static void sti_nop(char *p)
288 {
289     asm volatile (
290 		  ".globl post_sti \n\t"
291 		  "sti \n"
292 		  /*
293 		   * vmx won't exit on external interrupt if blocked-by-sti,
294 		   * so give it a reason to exit by accessing an unmapped page.
295 		   */
296 		  "post_sti: testb $0, %0 \n\t"
297 		  "nop \n\t"
298 		  "cli"
299 		  : : "m"(*p)
300 		  );
301     nmi_counter = nmi_counter_private;
302 }
303 
304 static void sti_loop(void *ignore)
305 {
306     unsigned k = 0;
307 
308     while (sti_loop_active) {
309 	sti_nop((char *)(ulong)((k++ * 4096) % (128 * 1024 * 1024)));
310     }
311 }
312 
313 static void nmi_handler(isr_regs_t *regs)
314 {
315     extern void post_sti(void);
316     ++nmi_counter_private;
317     nmi_hlt_counter += regs->rip == (ulong)post_sti;
318 }
319 
320 static void update_cr3(void *cr3)
321 {
322     write_cr3((ulong)cr3);
323 }
324 
325 static void test_sti_nmi(void)
326 {
327     unsigned old_counter;
328 
329     if (cpu_count() < 2) {
330 	return;
331     }
332 
333     handle_irq(2, nmi_handler);
334     on_cpu(1, update_cr3, (void *)read_cr3());
335 
336     sti_loop_active = 1;
337     on_cpu_async(1, sti_loop, 0);
338     while (nmi_counter < 30000) {
339 	old_counter = nmi_counter;
340 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[1]);
341 	while (nmi_counter == old_counter) {
342 	    ;
343 	}
344     }
345     sti_loop_active = 0;
346     report("nmi-after-sti", nmi_hlt_counter == 0);
347 }
348 
349 static volatile bool nmi_done, nmi_flushed;
350 static volatile int nmi_received;
351 static volatile int cpu0_nmi_ctr1, cpu1_nmi_ctr1;
352 static volatile int cpu0_nmi_ctr2, cpu1_nmi_ctr2;
353 
354 static void multiple_nmi_handler(isr_regs_t *regs)
355 {
356     ++nmi_received;
357 }
358 
359 static void kick_me_nmi(void *blah)
360 {
361     while (!nmi_done) {
362 	++cpu1_nmi_ctr1;
363 	while (cpu1_nmi_ctr1 != cpu0_nmi_ctr1 && !nmi_done) {
364 	    pause();
365 	}
366 	if (nmi_done) {
367 	    return;
368 	}
369 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
370 	/* make sure the NMI has arrived by sending an IPI after it */
371 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT
372 		       | 0x44, id_map[0]);
373 	++cpu1_nmi_ctr2;
374 	while (cpu1_nmi_ctr2 != cpu0_nmi_ctr2 && !nmi_done) {
375 	    pause();
376 	}
377     }
378 }
379 
380 static void flush_nmi(isr_regs_t *regs)
381 {
382     nmi_flushed = true;
383     apic_write(APIC_EOI, 0);
384 }
385 
386 static void test_multiple_nmi(void)
387 {
388     int i;
389     bool ok = true;
390 
391     if (cpu_count() < 2) {
392 	return;
393     }
394 
395     sti();
396     handle_irq(2, multiple_nmi_handler);
397     handle_irq(0x44, flush_nmi);
398     on_cpu_async(1, kick_me_nmi, 0);
399     for (i = 0; i < 1000000; ++i) {
400 	nmi_flushed = false;
401 	nmi_received = 0;
402 	++cpu0_nmi_ctr1;
403 	while (cpu1_nmi_ctr1 != cpu0_nmi_ctr1) {
404 	    pause();
405 	}
406 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
407 	while (!nmi_flushed) {
408 	    pause();
409 	}
410 	if (nmi_received != 2) {
411 	    ok = false;
412 	    break;
413 	}
414 	++cpu0_nmi_ctr2;
415 	while (cpu1_nmi_ctr2 != cpu0_nmi_ctr2) {
416 	    pause();
417 	}
418     }
419     nmi_done = true;
420     report("multiple nmi", ok);
421 }
422 
423 static void pending_nmi_handler(isr_regs_t *regs)
424 {
425     int i;
426 
427     if (++nmi_received == 1) {
428         for (i = 0; i < 10; ++i)
429             apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
430     }
431 }
432 
433 static void test_pending_nmi(void)
434 {
435     int i;
436 
437     handle_irq(2, pending_nmi_handler);
438     for (i = 0; i < 100000; ++i) {
439 	    nmi_received = 0;
440 
441         apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
442         while (nmi_received < 2)
443             pause();
444 
445         if (nmi_received != 2)
446             break;
447     }
448     report("pending nmi", nmi_received == 2);
449 }
450 
451 static volatile int lvtt_counter = 0;
452 
453 static void lvtt_handler(isr_regs_t *regs)
454 {
455     lvtt_counter++;
456     eoi();
457 }
458 
459 static void test_apic_timer_one_shot(void)
460 {
461     uint64_t tsc1, tsc2;
462     static const uint32_t interval = 0x10000;
463 
464 #define APIC_LVT_TIMER_VECTOR    (0xee)
465 
466     handle_irq(APIC_LVT_TIMER_VECTOR, lvtt_handler);
467     irq_enable();
468 
469     /* One shot mode */
470     apic_write(APIC_LVTT, APIC_LVT_TIMER_ONESHOT |
471                APIC_LVT_TIMER_VECTOR);
472     /* Divider == 1 */
473     apic_write(APIC_TDCR, 0x0000000b);
474 
475     tsc1 = rdtsc();
476     /* Set "Initial Counter Register", which starts the timer */
477     apic_write(APIC_TMICT, interval);
478     while (!lvtt_counter);
479     tsc2 = rdtsc();
480 
481     /*
482      * For LVT Timer clock, SDM vol 3 10.5.4 says it should be
483      * derived from processor's bus clock (IIUC which is the same
484      * as TSC), however QEMU seems to be using nanosecond. In all
485      * cases, the following should satisfy on all modern
486      * processors.
487      */
488     report("APIC LVT timer one shot", (lvtt_counter == 1) &&
489            (tsc2 - tsc1 >= interval));
490 }
491 
492 static atomic_t broadcast_counter;
493 
494 static void broadcast_handler(isr_regs_t *regs)
495 {
496 	atomic_inc(&broadcast_counter);
497 	eoi();
498 }
499 
500 static bool broadcast_received(unsigned ncpus)
501 {
502 	unsigned counter;
503 	u64 start = rdtsc();
504 
505 	do {
506 		counter = atomic_read(&broadcast_counter);
507 		if (counter >= ncpus)
508 			break;
509 		pause();
510 	} while (rdtsc() - start < 1000000000);
511 
512 	atomic_set(&broadcast_counter, 0);
513 
514 	return counter == ncpus;
515 }
516 
517 static void test_physical_broadcast(void)
518 {
519 	unsigned ncpus = cpu_count();
520 	unsigned long cr3 = read_cr3();
521 	u32 broadcast_address = enable_x2apic() ? 0xffffffff : 0xff;
522 
523 	handle_irq(BROADCAST_VECTOR, broadcast_handler);
524 	for (int c = 1; c < ncpus; c++)
525 		on_cpu(c, update_cr3, (void *)cr3);
526 
527 	printf("starting broadcast (%s)\n", enable_x2apic() ? "x2apic" : "xapic");
528 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT |
529 			BROADCAST_VECTOR, broadcast_address);
530 	report("APIC physical broadcast address", broadcast_received(ncpus));
531 
532 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT |
533 			BROADCAST_VECTOR | APIC_DEST_ALLINC, 0);
534 	report("APIC physical broadcast shorthand", broadcast_received(ncpus));
535 }
536 
537 static void wait_until_tmcct_common(uint32_t initial_count, bool stop_when_half, bool should_wrap_around)
538 {
539 	uint32_t tmcct = apic_read(APIC_TMCCT);
540 
541 	if (tmcct) {
542 		while (tmcct > (initial_count / 2))
543 			tmcct = apic_read(APIC_TMCCT);
544 
545 		if ( stop_when_half )
546 			return;
547 
548 		/* Wait until the counter reach 0 or wrap-around */
549 		while ( tmcct <= (initial_count / 2) && tmcct > 0 )
550 			tmcct = apic_read(APIC_TMCCT);
551 
552 		/* Wait specifically for wrap around to skip 0 TMCCR if we were asked to */
553 		while (should_wrap_around && !tmcct)
554 			tmcct = apic_read(APIC_TMCCT);
555 	}
556 }
557 
558 static void wait_until_tmcct_is_zero(uint32_t initial_count, bool stop_when_half)
559 {
560 	return wait_until_tmcct_common(initial_count, stop_when_half, false);
561 }
562 
563 static void wait_until_tmcct_wrap_around(uint32_t initial_count, bool stop_when_half)
564 {
565 	return wait_until_tmcct_common(initial_count, stop_when_half, true);
566 }
567 
568 static inline void apic_change_mode(unsigned long new_mode)
569 {
570 	uint32_t lvtt;
571 
572 	lvtt = apic_read(APIC_LVTT);
573 	apic_write(APIC_LVTT, (lvtt & ~APIC_LVT_TIMER_MASK) | new_mode);
574 }
575 
576 static void test_apic_change_mode(void)
577 {
578 	uint32_t tmict = 0x999999;
579 
580 	printf("starting apic change mode\n");
581 
582 	apic_write(APIC_TMICT, tmict);
583 
584 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
585 
586 	report("TMICT value reset", apic_read(APIC_TMICT) == tmict);
587 
588 	/* Testing one-shot */
589 	apic_change_mode(APIC_LVT_TIMER_ONESHOT);
590 	apic_write(APIC_TMICT, tmict);
591 	report("TMCCT should have a non-zero value", apic_read(APIC_TMCCT));
592 
593 	wait_until_tmcct_is_zero(tmict, false);
594 	report("TMCCT should have reached 0", !apic_read(APIC_TMCCT));
595 
596 	/*
597 	 * Write TMICT before changing mode from one-shot to periodic TMCCT should
598 	 * be reset to TMICT periodicly
599 	 */
600 	apic_write(APIC_TMICT, tmict);
601 	wait_until_tmcct_is_zero(tmict, true);
602 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
603 	report("TMCCT should have a non-zero value", apic_read(APIC_TMCCT));
604 
605 	/*
606 	 * After the change of mode, the counter should not be reset and continue
607 	 * counting down from where it was
608 	 */
609 	report("TMCCT should not be reset to TMICT value", apic_read(APIC_TMCCT) < (tmict / 2));
610 	/*
611 	 * Specifically wait for timer wrap around and skip 0.
612 	 * Under KVM lapic there is a possibility that a small amount of consecutive
613 	 * TMCCR reads return 0 while hrtimer is reset in an async callback
614 	 */
615 	wait_until_tmcct_wrap_around(tmict, false);
616 	report("TMCCT should be reset to the initial-count", apic_read(APIC_TMCCT) > (tmict / 2));
617 
618 	wait_until_tmcct_is_zero(tmict, true);
619 	/*
620 	 * Keep the same TMICT and change timer mode to one-shot
621 	 * TMCCT should be > 0 and count-down to 0
622 	 */
623 	apic_change_mode(APIC_LVT_TIMER_ONESHOT);
624 	report("TMCCT should not be reset to init", apic_read(APIC_TMCCT) < (tmict / 2));
625 	wait_until_tmcct_is_zero(tmict, false);
626 	report("TMCCT should have reach zero", !apic_read(APIC_TMCCT));
627 
628 	/* now tmcct == 0 and tmict != 0 */
629 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
630 	report("TMCCT should stay at zero", !apic_read(APIC_TMCCT));
631 }
632 
633 #define KVM_HC_SEND_IPI 10
634 
635 static void test_pv_ipi(void)
636 {
637     int ret;
638     unsigned long a0 = 0xFFFFFFFF, a1 = 0, a2 = 0xFFFFFFFF, a3 = 0x0;
639 
640     asm volatile("vmcall" : "=a"(ret) :"a"(KVM_HC_SEND_IPI), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
641     report("PV IPIs testing", !ret);
642 }
643 
644 int main(void)
645 {
646     setup_vm();
647     smp_init();
648 
649     test_lapic_existence();
650 
651     mask_pic_interrupts();
652     test_apic_id();
653     test_apic_disable();
654     test_enable_x2apic();
655     test_apicbase();
656 
657     test_self_ipi();
658     test_physical_broadcast();
659     if (test_device_enabled())
660         test_pv_ipi();
661 
662     test_sti_nmi();
663     test_multiple_nmi();
664     test_pending_nmi();
665 
666     test_apic_timer_one_shot();
667     test_apic_change_mode();
668     test_tsc_deadline_timer();
669 
670     return report_summary();
671 }
672