xref: /kvm-unit-tests/x86/apic.c (revision badc98cafea47463b8151eefbc6d4954f6aec6a9)
1 #include "libcflat.h"
2 #include "apic.h"
3 #include "vm.h"
4 #include "smp.h"
5 #include "desc.h"
6 #include "isr.h"
7 #include "msr.h"
8 #include "atomic.h"
9 
10 #define MAX_TPR			0xf
11 
12 static void test_lapic_existence(void)
13 {
14     u8 version;
15 
16     version = (u8)apic_read(APIC_LVR);
17     printf("apic version: %x\n", version);
18     report("apic existence", version >= 0x10 && version <= 0x15);
19 }
20 
21 #define TSC_DEADLINE_TIMER_VECTOR 0xef
22 #define BROADCAST_VECTOR 0xcf
23 
24 static int tdt_count;
25 
26 static void tsc_deadline_timer_isr(isr_regs_t *regs)
27 {
28     ++tdt_count;
29     eoi();
30 }
31 
32 static void __test_tsc_deadline_timer(void)
33 {
34     handle_irq(TSC_DEADLINE_TIMER_VECTOR, tsc_deadline_timer_isr);
35     irq_enable();
36 
37     wrmsr(MSR_IA32_TSCDEADLINE, rdmsr(MSR_IA32_TSC));
38     asm volatile ("nop");
39     report("tsc deadline timer", tdt_count == 1);
40     report("tsc deadline timer clearing", rdmsr(MSR_IA32_TSCDEADLINE) == 0);
41 }
42 
43 static int enable_tsc_deadline_timer(void)
44 {
45     uint32_t lvtt;
46 
47     if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
48         lvtt = APIC_LVT_TIMER_TSCDEADLINE | TSC_DEADLINE_TIMER_VECTOR;
49         apic_write(APIC_LVTT, lvtt);
50         return 1;
51     } else {
52         return 0;
53     }
54 }
55 
56 static void test_tsc_deadline_timer(void)
57 {
58     if(enable_tsc_deadline_timer()) {
59         __test_tsc_deadline_timer();
60     } else {
61         report_skip("tsc deadline timer not detected");
62     }
63 }
64 
65 static void do_write_apicbase(void *data)
66 {
67     wrmsr(MSR_IA32_APICBASE, *(u64 *)data);
68 }
69 
70 static bool test_write_apicbase_exception(u64 data)
71 {
72     return test_for_exception(GP_VECTOR, do_write_apicbase, &data);
73 }
74 
75 static void test_enable_x2apic(void)
76 {
77     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
78     u64 apicbase;
79 
80     if (enable_x2apic()) {
81         printf("x2apic enabled\n");
82 
83         apicbase = orig_apicbase & ~(APIC_EN | APIC_EXTD);
84         report("x2apic enabled to invalid state",
85                test_write_apicbase_exception(apicbase | APIC_EXTD));
86         report("x2apic enabled to apic enabled",
87                test_write_apicbase_exception(apicbase | APIC_EN));
88 
89         report("x2apic enabled to disabled state",
90                !test_write_apicbase_exception(apicbase | 0));
91         report("disabled to invalid state",
92                test_write_apicbase_exception(apicbase | APIC_EXTD));
93         report("disabled to x2apic enabled",
94                test_write_apicbase_exception(apicbase | APIC_EN | APIC_EXTD));
95 
96         report("apic disabled to apic enabled",
97                !test_write_apicbase_exception(apicbase | APIC_EN));
98         report("apic enabled to invalid state",
99                test_write_apicbase_exception(apicbase | APIC_EXTD));
100 
101         if (orig_apicbase & APIC_EXTD)
102             enable_x2apic();
103         else
104             reset_apic();
105 
106         /*
107          * Disabling the APIC resets various APIC registers, restore them to
108          * their desired values.
109          */
110         apic_write(APIC_SPIV, 0x1ff);
111     } else {
112         printf("x2apic not detected\n");
113 
114         report("enable unsupported x2apic",
115                test_write_apicbase_exception(APIC_EN | APIC_EXTD));
116     }
117 }
118 
119 static void verify_disabled_apic_mmio(void)
120 {
121     volatile u32 *lvr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_LVR);
122     volatile u32 *tpr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_TASKPRI);
123     u32 cr8 = read_cr8();
124 
125     memset((void *)APIC_DEFAULT_PHYS_BASE, 0xff, PAGE_SIZE);
126     report("*0xfee00030: %x", *lvr == ~0, *lvr);
127     report("CR8: %lx", read_cr8() == cr8, read_cr8());
128     write_cr8(cr8 ^ MAX_TPR);
129     report("CR8: %lx", read_cr8() == (cr8 ^ MAX_TPR), read_cr8());
130     report("*0xfee00080: %x", *tpr == ~0, *tpr);
131     write_cr8(cr8);
132 }
133 
134 static void test_apic_disable(void)
135 {
136     volatile u32 *lvr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_LVR);
137     volatile u32 *tpr = (volatile u32 *)(APIC_DEFAULT_PHYS_BASE + APIC_TASKPRI);
138     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
139     u32 apic_version = apic_read(APIC_LVR);
140     u32 cr8 = read_cr8();
141 
142     report_prefix_push("apic_disable");
143     assert_msg(orig_apicbase & APIC_EN, "APIC not enabled.");
144 
145     disable_apic();
146     report("Local apic disabled", !(rdmsr(MSR_IA32_APICBASE) & APIC_EN));
147     report("CPUID.1H:EDX.APIC[bit 9] is clear", !this_cpu_has(X86_FEATURE_APIC));
148     verify_disabled_apic_mmio();
149 
150     reset_apic();
151     report("Local apic enabled in xAPIC mode",
152 	   (rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) == APIC_EN);
153     report("CPUID.1H:EDX.APIC[bit 9] is set", this_cpu_has(X86_FEATURE_APIC));
154     report("*0xfee00030: %x", *lvr == apic_version, *lvr);
155     report("*0xfee00080: %x", *tpr == cr8, *tpr);
156     write_cr8(cr8 ^ MAX_TPR);
157     report("*0xfee00080: %x", *tpr == (cr8 ^ MAX_TPR) << 4, *tpr);
158     write_cr8(cr8);
159 
160     if (enable_x2apic()) {
161 	apic_write(APIC_SPIV, 0x1ff);
162 	report("Local apic enabled in x2APIC mode",
163 	   (rdmsr(MSR_IA32_APICBASE) & (APIC_EN | APIC_EXTD)) ==
164 	   (APIC_EN | APIC_EXTD));
165 	report("CPUID.1H:EDX.APIC[bit 9] is set", this_cpu_has(X86_FEATURE_APIC));
166 	verify_disabled_apic_mmio();
167 	if (!(orig_apicbase & APIC_EXTD))
168 	    reset_apic();
169     }
170     report_prefix_pop();
171 }
172 
173 #define ALTERNATE_APIC_BASE	0xfed40000
174 
175 static void test_apicbase(void)
176 {
177     u64 orig_apicbase = rdmsr(MSR_IA32_APICBASE);
178     u32 lvr = apic_read(APIC_LVR);
179     u64 value;
180 
181     wrmsr(MSR_IA32_APICBASE, orig_apicbase & ~(APIC_EN | APIC_EXTD));
182     wrmsr(MSR_IA32_APICBASE, ALTERNATE_APIC_BASE | APIC_BSP | APIC_EN);
183 
184     report_prefix_push("apicbase");
185 
186     report("relocate apic",
187            *(volatile u32 *)(ALTERNATE_APIC_BASE + APIC_LVR) == lvr);
188 
189     value = orig_apicbase | (1UL << cpuid_maxphyaddr());
190     report("reserved physaddr bits",
191            test_for_exception(GP_VECTOR, do_write_apicbase, &value));
192 
193     value = orig_apicbase | 1;
194     report("reserved low bits",
195            test_for_exception(GP_VECTOR, do_write_apicbase, &value));
196 
197     wrmsr(MSR_IA32_APICBASE, orig_apicbase);
198     apic_write(APIC_SPIV, 0x1ff);
199 
200     report_prefix_pop();
201 }
202 
203 static void do_write_apic_id(void *id)
204 {
205     apic_write(APIC_ID, *(u32 *)id);
206 }
207 
208 static void __test_apic_id(void * unused)
209 {
210     u32 id, newid;
211     u8  initial_xapic_id = cpuid(1).b >> 24;
212     u32 initial_x2apic_id = cpuid(0xb).d;
213     bool x2apic_mode = rdmsr(MSR_IA32_APICBASE) & APIC_EXTD;
214 
215     if (x2apic_mode)
216         reset_apic();
217 
218     id = apic_id();
219     report("xapic id matches cpuid", initial_xapic_id == id);
220 
221     newid = (id + 1) << 24;
222     report("writeable xapic id",
223             !test_for_exception(GP_VECTOR, do_write_apic_id, &newid) &&
224 	    (id == apic_id() || id + 1 == apic_id()));
225 
226     if (!enable_x2apic())
227         goto out;
228 
229     report("non-writeable x2apic id",
230             test_for_exception(GP_VECTOR, do_write_apic_id, &newid));
231     report("sane x2apic id", initial_xapic_id == (apic_id() & 0xff));
232 
233     /* old QEMUs do not set initial x2APIC ID */
234     report("x2apic id matches cpuid",
235            initial_xapic_id == (initial_x2apic_id & 0xff) &&
236            initial_x2apic_id == apic_id());
237 
238 out:
239     reset_apic();
240 
241     report("correct xapic id after reset", initial_xapic_id == apic_id());
242 
243     /* old KVMs do not reset xAPIC ID */
244     if (id != apic_id())
245         apic_write(APIC_ID, id << 24);
246 
247     if (x2apic_mode)
248         enable_x2apic();
249 }
250 
251 static void test_apic_id(void)
252 {
253     if (cpu_count() < 2)
254         return;
255 
256     on_cpu(1, __test_apic_id, NULL);
257 }
258 
259 static int ipi_count;
260 
261 static void self_ipi_isr(isr_regs_t *regs)
262 {
263     ++ipi_count;
264     eoi();
265 }
266 
267 static void test_self_ipi(void)
268 {
269     u64 start = rdtsc();
270     int vec = 0xf1;
271 
272     handle_irq(vec, self_ipi_isr);
273     irq_enable();
274     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | vec,
275                    id_map[0]);
276 
277     do {
278         pause();
279     } while (rdtsc() - start < 1000000000 && ipi_count == 0);
280 
281     report("self ipi", ipi_count == 1);
282 }
283 
284 volatile int nmi_counter_private, nmi_counter, nmi_hlt_counter, sti_loop_active;
285 
286 static void sti_nop(char *p)
287 {
288     asm volatile (
289 		  ".globl post_sti \n\t"
290 		  "sti \n"
291 		  /*
292 		   * vmx won't exit on external interrupt if blocked-by-sti,
293 		   * so give it a reason to exit by accessing an unmapped page.
294 		   */
295 		  "post_sti: testb $0, %0 \n\t"
296 		  "nop \n\t"
297 		  "cli"
298 		  : : "m"(*p)
299 		  );
300     nmi_counter = nmi_counter_private;
301 }
302 
303 static void sti_loop(void *ignore)
304 {
305     unsigned k = 0;
306 
307     while (sti_loop_active) {
308 	sti_nop((char *)(ulong)((k++ * 4096) % (128 * 1024 * 1024)));
309     }
310 }
311 
312 static void nmi_handler(isr_regs_t *regs)
313 {
314     extern void post_sti(void);
315     ++nmi_counter_private;
316     nmi_hlt_counter += regs->rip == (ulong)post_sti;
317 }
318 
319 static void update_cr3(void *cr3)
320 {
321     write_cr3((ulong)cr3);
322 }
323 
324 static void test_sti_nmi(void)
325 {
326     unsigned old_counter;
327 
328     if (cpu_count() < 2) {
329 	return;
330     }
331 
332     handle_irq(2, nmi_handler);
333     on_cpu(1, update_cr3, (void *)read_cr3());
334 
335     sti_loop_active = 1;
336     on_cpu_async(1, sti_loop, 0);
337     while (nmi_counter < 30000) {
338 	old_counter = nmi_counter;
339 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[1]);
340 	while (nmi_counter == old_counter) {
341 	    ;
342 	}
343     }
344     sti_loop_active = 0;
345     report("nmi-after-sti", nmi_hlt_counter == 0);
346 }
347 
348 static volatile bool nmi_done, nmi_flushed;
349 static volatile int nmi_received;
350 static volatile int cpu0_nmi_ctr1, cpu1_nmi_ctr1;
351 static volatile int cpu0_nmi_ctr2, cpu1_nmi_ctr2;
352 
353 static void multiple_nmi_handler(isr_regs_t *regs)
354 {
355     ++nmi_received;
356 }
357 
358 static void kick_me_nmi(void *blah)
359 {
360     while (!nmi_done) {
361 	++cpu1_nmi_ctr1;
362 	while (cpu1_nmi_ctr1 != cpu0_nmi_ctr1 && !nmi_done) {
363 	    pause();
364 	}
365 	if (nmi_done) {
366 	    return;
367 	}
368 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
369 	/* make sure the NMI has arrived by sending an IPI after it */
370 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT
371 		       | 0x44, id_map[0]);
372 	++cpu1_nmi_ctr2;
373 	while (cpu1_nmi_ctr2 != cpu0_nmi_ctr2 && !nmi_done) {
374 	    pause();
375 	}
376     }
377 }
378 
379 static void flush_nmi(isr_regs_t *regs)
380 {
381     nmi_flushed = true;
382     apic_write(APIC_EOI, 0);
383 }
384 
385 static void test_multiple_nmi(void)
386 {
387     int i;
388     bool ok = true;
389 
390     if (cpu_count() < 2) {
391 	return;
392     }
393 
394     sti();
395     handle_irq(2, multiple_nmi_handler);
396     handle_irq(0x44, flush_nmi);
397     on_cpu_async(1, kick_me_nmi, 0);
398     for (i = 0; i < 1000000; ++i) {
399 	nmi_flushed = false;
400 	nmi_received = 0;
401 	++cpu0_nmi_ctr1;
402 	while (cpu1_nmi_ctr1 != cpu0_nmi_ctr1) {
403 	    pause();
404 	}
405 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
406 	while (!nmi_flushed) {
407 	    pause();
408 	}
409 	if (nmi_received != 2) {
410 	    ok = false;
411 	    break;
412 	}
413 	++cpu0_nmi_ctr2;
414 	while (cpu1_nmi_ctr2 != cpu0_nmi_ctr2) {
415 	    pause();
416 	}
417     }
418     nmi_done = true;
419     report("multiple nmi", ok);
420 }
421 
422 static void pending_nmi_handler(isr_regs_t *regs)
423 {
424     int i;
425 
426     if (++nmi_received == 1) {
427         for (i = 0; i < 10; ++i)
428             apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
429     }
430 }
431 
432 static void test_pending_nmi(void)
433 {
434     int i;
435 
436     handle_irq(2, pending_nmi_handler);
437     for (i = 0; i < 100000; ++i) {
438 	    nmi_received = 0;
439 
440         apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
441         while (nmi_received < 2)
442             pause();
443 
444         if (nmi_received != 2)
445             break;
446     }
447     report("pending nmi", nmi_received == 2);
448 }
449 
450 static volatile int lvtt_counter = 0;
451 
452 static void lvtt_handler(isr_regs_t *regs)
453 {
454     lvtt_counter++;
455     eoi();
456 }
457 
458 static void test_apic_timer_one_shot(void)
459 {
460     uint64_t tsc1, tsc2;
461     static const uint32_t interval = 0x10000;
462 
463 #define APIC_LVT_TIMER_VECTOR    (0xee)
464 
465     handle_irq(APIC_LVT_TIMER_VECTOR, lvtt_handler);
466     irq_enable();
467 
468     /* One shot mode */
469     apic_write(APIC_LVTT, APIC_LVT_TIMER_ONESHOT |
470                APIC_LVT_TIMER_VECTOR);
471     /* Divider == 1 */
472     apic_write(APIC_TDCR, 0x0000000b);
473 
474     tsc1 = rdtsc();
475     /* Set "Initial Counter Register", which starts the timer */
476     apic_write(APIC_TMICT, interval);
477     while (!lvtt_counter);
478     tsc2 = rdtsc();
479 
480     /*
481      * For LVT Timer clock, SDM vol 3 10.5.4 says it should be
482      * derived from processor's bus clock (IIUC which is the same
483      * as TSC), however QEMU seems to be using nanosecond. In all
484      * cases, the following should satisfy on all modern
485      * processors.
486      */
487     report("APIC LVT timer one shot", (lvtt_counter == 1) &&
488            (tsc2 - tsc1 >= interval));
489 }
490 
491 static atomic_t broadcast_counter;
492 
493 static void broadcast_handler(isr_regs_t *regs)
494 {
495 	atomic_inc(&broadcast_counter);
496 	eoi();
497 }
498 
499 static bool broadcast_received(unsigned ncpus)
500 {
501 	unsigned counter;
502 	u64 start = rdtsc();
503 
504 	do {
505 		counter = atomic_read(&broadcast_counter);
506 		if (counter >= ncpus)
507 			break;
508 		pause();
509 	} while (rdtsc() - start < 1000000000);
510 
511 	atomic_set(&broadcast_counter, 0);
512 
513 	return counter == ncpus;
514 }
515 
516 static void test_physical_broadcast(void)
517 {
518 	unsigned ncpus = cpu_count();
519 	unsigned long cr3 = read_cr3();
520 	u32 broadcast_address = enable_x2apic() ? 0xffffffff : 0xff;
521 
522 	handle_irq(BROADCAST_VECTOR, broadcast_handler);
523 	for (int c = 1; c < ncpus; c++)
524 		on_cpu(c, update_cr3, (void *)cr3);
525 
526 	printf("starting broadcast (%s)\n", enable_x2apic() ? "x2apic" : "xapic");
527 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT |
528 			BROADCAST_VECTOR, broadcast_address);
529 	report("APIC physical broadcast address", broadcast_received(ncpus));
530 
531 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_FIXED | APIC_INT_ASSERT |
532 			BROADCAST_VECTOR | APIC_DEST_ALLINC, 0);
533 	report("APIC physical broadcast shorthand", broadcast_received(ncpus));
534 }
535 
536 static void wait_until_tmcct_common(uint32_t initial_count, bool stop_when_half, bool should_wrap_around)
537 {
538 	uint32_t tmcct = apic_read(APIC_TMCCT);
539 
540 	if (tmcct) {
541 		while (tmcct > (initial_count / 2))
542 			tmcct = apic_read(APIC_TMCCT);
543 
544 		if ( stop_when_half )
545 			return;
546 
547 		/* Wait until the counter reach 0 or wrap-around */
548 		while ( tmcct <= (initial_count / 2) && tmcct > 0 )
549 			tmcct = apic_read(APIC_TMCCT);
550 
551 		/* Wait specifically for wrap around to skip 0 TMCCR if we were asked to */
552 		while (should_wrap_around && !tmcct)
553 			tmcct = apic_read(APIC_TMCCT);
554 	}
555 }
556 
557 static void wait_until_tmcct_is_zero(uint32_t initial_count, bool stop_when_half)
558 {
559 	return wait_until_tmcct_common(initial_count, stop_when_half, false);
560 }
561 
562 static void wait_until_tmcct_wrap_around(uint32_t initial_count, bool stop_when_half)
563 {
564 	return wait_until_tmcct_common(initial_count, stop_when_half, true);
565 }
566 
567 static inline void apic_change_mode(unsigned long new_mode)
568 {
569 	uint32_t lvtt;
570 
571 	lvtt = apic_read(APIC_LVTT);
572 	apic_write(APIC_LVTT, (lvtt & ~APIC_LVT_TIMER_MASK) | new_mode);
573 }
574 
575 static void test_apic_change_mode(void)
576 {
577 	uint32_t tmict = 0x999999;
578 
579 	printf("starting apic change mode\n");
580 
581 	apic_write(APIC_TMICT, tmict);
582 
583 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
584 
585 	report("TMICT value reset", apic_read(APIC_TMICT) == tmict);
586 
587 	/* Testing one-shot */
588 	apic_change_mode(APIC_LVT_TIMER_ONESHOT);
589 	apic_write(APIC_TMICT, tmict);
590 	report("TMCCT should have a non-zero value", apic_read(APIC_TMCCT));
591 
592 	wait_until_tmcct_is_zero(tmict, false);
593 	report("TMCCT should have reached 0", !apic_read(APIC_TMCCT));
594 
595 	/*
596 	 * Write TMICT before changing mode from one-shot to periodic TMCCT should
597 	 * be reset to TMICT periodicly
598 	 */
599 	apic_write(APIC_TMICT, tmict);
600 	wait_until_tmcct_is_zero(tmict, true);
601 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
602 	report("TMCCT should have a non-zero value", apic_read(APIC_TMCCT));
603 
604 	/*
605 	 * After the change of mode, the counter should not be reset and continue
606 	 * counting down from where it was
607 	 */
608 	report("TMCCT should not be reset to TMICT value", apic_read(APIC_TMCCT) < (tmict / 2));
609 	/*
610 	 * Specifically wait for timer wrap around and skip 0.
611 	 * Under KVM lapic there is a possibility that a small amount of consecutive
612 	 * TMCCR reads return 0 while hrtimer is reset in an async callback
613 	 */
614 	wait_until_tmcct_wrap_around(tmict, false);
615 	report("TMCCT should be reset to the initial-count", apic_read(APIC_TMCCT) > (tmict / 2));
616 
617 	wait_until_tmcct_is_zero(tmict, true);
618 	/*
619 	 * Keep the same TMICT and change timer mode to one-shot
620 	 * TMCCT should be > 0 and count-down to 0
621 	 */
622 	apic_change_mode(APIC_LVT_TIMER_ONESHOT);
623 	report("TMCCT should not be reset to init", apic_read(APIC_TMCCT) < (tmict / 2));
624 	wait_until_tmcct_is_zero(tmict, false);
625 	report("TMCCT should have reach zero", !apic_read(APIC_TMCCT));
626 
627 	/* now tmcct == 0 and tmict != 0 */
628 	apic_change_mode(APIC_LVT_TIMER_PERIODIC);
629 	report("TMCCT should stay at zero", !apic_read(APIC_TMCCT));
630 }
631 
632 #define KVM_HC_SEND_IPI 10
633 
634 static void test_pv_ipi(void)
635 {
636     int ret;
637     unsigned long a0 = 0xFFFFFFFF, a1 = 0, a2 = 0xFFFFFFFF, a3 = 0x0;
638 
639     asm volatile("vmcall" : "=a"(ret) :"a"(KVM_HC_SEND_IPI), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
640     report("PV IPIs testing", !ret);
641 }
642 
643 int main(void)
644 {
645     setup_vm();
646     smp_init();
647 
648     test_lapic_existence();
649 
650     mask_pic_interrupts();
651     test_apic_id();
652     test_apic_disable();
653     test_enable_x2apic();
654     test_apicbase();
655 
656     test_self_ipi();
657     test_physical_broadcast();
658     test_pv_ipi();
659 
660     test_sti_nmi();
661     test_multiple_nmi();
662     test_pending_nmi();
663 
664     test_apic_timer_one_shot();
665     test_apic_change_mode();
666     test_tsc_deadline_timer();
667 
668     return report_summary();
669 }
670