xref: /kvm-unit-tests/x86/vmx.h (revision dcec966ff7423a29dad0e5ffdcf58e8a4095356f)
1 #ifndef X86_VMX_H
2 #define X86_VMX_H
3 
4 #include "libcflat.h"
5 #include "processor.h"
6 #include "bitops.h"
7 #include "util.h"
8 #include "asm/page.h"
9 #include "asm/io.h"
10 
11 void __abort_test(void);
12 
13 #define __TEST_ASSERT(cond)					\
14 do {								\
15 	if (!(cond)) {						\
16 		report_fail("%s:%d: Assertion failed: %s",	\
17 			    __FILE__, __LINE__, #cond);		\
18 		dump_stack();					\
19 		__abort_test();					\
20 	}							\
21 } while (0)
22 
23 #define TEST_ASSERT(cond)					\
24 do {								\
25 	__TEST_ASSERT(cond);					\
26 	report_passed();					\
27 } while (0)
28 
29 #define TEST_ASSERT_MSG(cond, fmt, args...)			\
30 do {								\
31 	if (!(cond)) {						\
32 		report_fail("%s:%d: Assertion failed: %s\n" fmt,\
33 			    __FILE__, __LINE__, #cond, ##args);	\
34 		dump_stack();					\
35 		__abort_test();					\
36 	}							\
37 	report_passed();					\
38 } while (0)
39 
40 #define TEST_ASSERT_EQ(a, b) __TEST_EQ(a, b, #a, #b, 1, __abort_test, "")
41 #define TEST_ASSERT_EQ_MSG(a, b, fmt, args...) \
42 	__TEST_EQ(a, b, #a, #b, 1, __abort_test, fmt, ## args)
43 
44 struct vmcs_hdr {
45 	u32 revision_id:31;
46 	u32 shadow_vmcs:1;
47 };
48 
49 struct vmcs {
50 	struct vmcs_hdr hdr;
51 	u32 abort; /* VMX-abort indicator */
52 	/* VMCS data */
53 	char data[0];
54 };
55 
56 struct invvpid_operand {
57 	u64 vpid;
58 	u64 gla;
59 };
60 
61 struct regs {
62 	u64 rax;
63 	u64 rcx;
64 	u64 rdx;
65 	u64 rbx;
66 	u64 cr2;
67 	u64 rbp;
68 	u64 rsi;
69 	u64 rdi;
70 	u64 r8;
71 	u64 r9;
72 	u64 r10;
73 	u64 r11;
74 	u64 r12;
75 	u64 r13;
76 	u64 r14;
77 	u64 r15;
78 	u64 rflags;
79 };
80 
81 union exit_reason {
82 	struct {
83 		u32	basic			: 16;
84 		u32	reserved16		: 1;
85 		u32	reserved17		: 1;
86 		u32	reserved18		: 1;
87 		u32	reserved19		: 1;
88 		u32	reserved20		: 1;
89 		u32	reserved21		: 1;
90 		u32	reserved22		: 1;
91 		u32	reserved23		: 1;
92 		u32	reserved24		: 1;
93 		u32	reserved25		: 1;
94 		u32	reserved26		: 1;
95 		u32	enclave_mode		: 1;
96 		u32	smi_pending_mtf		: 1;
97 		u32	smi_from_vmx_root	: 1;
98 		u32	reserved30		: 1;
99 		u32	failed_vmentry		: 1;
100 	};
101 	u32 full;
102 };
103 
104 struct vmentry_result {
105 	/* Instruction mnemonic (for convenience). */
106 	const char *instr;
107 	/* Did the test attempt vmlaunch or vmresume? */
108 	bool vmlaunch;
109 	/* Did the instruction VM-Fail? */
110 	bool vm_fail;
111 	/* Did the VM-Entry fully enter the guest? */
112 	bool entered;
113 	/* VM-Exit reason, valid iff !vm_fail */
114 	union exit_reason exit_reason;
115 	/* Contents of [re]flags after failed entry. */
116 	unsigned long flags;
117 };
118 
119 struct vmx_test {
120 	const char *name;
121 	int (*init)(struct vmcs *vmcs);
122 	void (*guest_main)(void);
123 	int (*exit_handler)(union exit_reason exit_reason);
124 	void (*syscall_handler)(u64 syscall_no);
125 	struct regs guest_regs;
126 	int (*entry_failure_handler)(struct vmentry_result *result);
127 	struct vmcs *vmcs;
128 	int exits;
129 	/* Alternative test interface. */
130 	void (*v2)(void);
131 };
132 
133 union vmx_basic_msr {
134 	u64 val;
135 	struct {
136 		u32 revision;
137 		u32	size:13,
138 			reserved1: 3,
139 			width:1,
140 			dual:1,
141 			type:4,
142 			insouts:1,
143 			ctrl:1,
144 			no_hw_errcode_cc:1,
145 			reserved2:7;
146 	};
147 };
148 
149 union vmx_ctrl_msr {
150 	u64 val;
151 	struct {
152 		u32 set, clr;
153 	};
154 };
155 
156 union vmx_misc {
157 	u64 val;
158 	struct {
159 		u32 pt_bit:5,
160 		    stores_lma:1,
161 		    act_hlt:1,
162 		    act_shutdown:1,
163 		    act_wfsipi:1,
164 		    :5,
165 		    vmx_pt:1,
166 		    smm_smbase:1,
167 		    cr3_targets:9,
168 		    msr_list_size:3,
169 		    smm_mon_ctl:1,
170 		    vmwrite_any:1,
171 		    inject_len0:1,
172 		    :1;
173 		u32 mseg_revision;
174 	};
175 };
176 
177 union vmx_ept_vpid {
178 	u64 val;
179 	struct {
180 		u32:16,
181 			super:2,
182 			: 2,
183 			invept:1,
184 			: 11;
185 		u32	invvpid:1;
186 	};
187 };
188 
189 enum Encoding {
190 	/* 16-Bit Control Fields */
191 	VPID			= 0x0000ul,
192 	/* Posted-interrupt notification vector */
193 	PINV			= 0x0002ul,
194 	/* EPTP index */
195 	EPTP_IDX		= 0x0004ul,
196 
197 	/* 16-Bit Guest State Fields */
198 	GUEST_SEL_ES		= 0x0800ul,
199 	GUEST_SEL_CS		= 0x0802ul,
200 	GUEST_SEL_SS		= 0x0804ul,
201 	GUEST_SEL_DS		= 0x0806ul,
202 	GUEST_SEL_FS		= 0x0808ul,
203 	GUEST_SEL_GS		= 0x080aul,
204 	GUEST_SEL_LDTR		= 0x080cul,
205 	GUEST_SEL_TR		= 0x080eul,
206 	GUEST_INT_STATUS	= 0x0810ul,
207 	GUEST_PML_INDEX         = 0x0812ul,
208 
209 	/* 16-Bit Host State Fields */
210 	HOST_SEL_ES		= 0x0c00ul,
211 	HOST_SEL_CS		= 0x0c02ul,
212 	HOST_SEL_SS		= 0x0c04ul,
213 	HOST_SEL_DS		= 0x0c06ul,
214 	HOST_SEL_FS		= 0x0c08ul,
215 	HOST_SEL_GS		= 0x0c0aul,
216 	HOST_SEL_TR		= 0x0c0cul,
217 
218 	/* 64-Bit Control Fields */
219 	IO_BITMAP_A		= 0x2000ul,
220 	IO_BITMAP_B		= 0x2002ul,
221 	MSR_BITMAP		= 0x2004ul,
222 	EXIT_MSR_ST_ADDR	= 0x2006ul,
223 	EXIT_MSR_LD_ADDR	= 0x2008ul,
224 	ENTER_MSR_LD_ADDR	= 0x200aul,
225 	VMCS_EXEC_PTR		= 0x200cul,
226 	TSC_OFFSET		= 0x2010ul,
227 	TSC_OFFSET_HI		= 0x2011ul,
228 	APIC_VIRT_ADDR		= 0x2012ul,
229 	APIC_ACCS_ADDR		= 0x2014ul,
230 	POSTED_INTR_DESC_ADDR	= 0x2016ul,
231 	EPTP			= 0x201aul,
232 	EPTP_HI			= 0x201bul,
233 	VMREAD_BITMAP           = 0x2026ul,
234 	VMREAD_BITMAP_HI        = 0x2027ul,
235 	VMWRITE_BITMAP          = 0x2028ul,
236 	VMWRITE_BITMAP_HI       = 0x2029ul,
237 	EOI_EXIT_BITMAP0	= 0x201cul,
238 	EOI_EXIT_BITMAP1	= 0x201eul,
239 	EOI_EXIT_BITMAP2	= 0x2020ul,
240 	EOI_EXIT_BITMAP3	= 0x2022ul,
241 	PMLADDR                 = 0x200eul,
242 	PMLADDR_HI              = 0x200ful,
243 
244 
245 	/* 64-Bit Readonly Data Field */
246 	INFO_PHYS_ADDR		= 0x2400ul,
247 
248 	/* 64-Bit Guest State */
249 	VMCS_LINK_PTR		= 0x2800ul,
250 	VMCS_LINK_PTR_HI	= 0x2801ul,
251 	GUEST_DEBUGCTL		= 0x2802ul,
252 	GUEST_DEBUGCTL_HI	= 0x2803ul,
253 	GUEST_EFER		= 0x2806ul,
254 	GUEST_PAT		= 0x2804ul,
255 	GUEST_PERF_GLOBAL_CTRL	= 0x2808ul,
256 	GUEST_PDPTE		= 0x280aul,
257 	GUEST_BNDCFGS		= 0x2812ul,
258 
259 	/* 64-Bit Host State */
260 	HOST_PAT		= 0x2c00ul,
261 	HOST_EFER		= 0x2c02ul,
262 	HOST_PERF_GLOBAL_CTRL	= 0x2c04ul,
263 
264 	/* 32-Bit Control Fields */
265 	PIN_CONTROLS		= 0x4000ul,
266 	CPU_EXEC_CTRL0		= 0x4002ul,
267 	EXC_BITMAP		= 0x4004ul,
268 	PF_ERROR_MASK		= 0x4006ul,
269 	PF_ERROR_MATCH		= 0x4008ul,
270 	CR3_TARGET_COUNT	= 0x400aul,
271 	EXI_CONTROLS		= 0x400cul,
272 	EXI_MSR_ST_CNT		= 0x400eul,
273 	EXI_MSR_LD_CNT		= 0x4010ul,
274 	ENT_CONTROLS		= 0x4012ul,
275 	ENT_MSR_LD_CNT		= 0x4014ul,
276 	ENT_INTR_INFO		= 0x4016ul,
277 	ENT_INTR_ERROR		= 0x4018ul,
278 	ENT_INST_LEN		= 0x401aul,
279 	TPR_THRESHOLD		= 0x401cul,
280 	CPU_EXEC_CTRL1		= 0x401eul,
281 
282 	/* 32-Bit R/O Data Fields */
283 	VMX_INST_ERROR		= 0x4400ul,
284 	EXI_REASON		= 0x4402ul,
285 	EXI_INTR_INFO		= 0x4404ul,
286 	EXI_INTR_ERROR		= 0x4406ul,
287 	IDT_VECT_INFO		= 0x4408ul,
288 	IDT_VECT_ERROR		= 0x440aul,
289 	EXI_INST_LEN		= 0x440cul,
290 	EXI_INST_INFO		= 0x440eul,
291 
292 	/* 32-Bit Guest State Fields */
293 	GUEST_LIMIT_ES		= 0x4800ul,
294 	GUEST_LIMIT_CS		= 0x4802ul,
295 	GUEST_LIMIT_SS		= 0x4804ul,
296 	GUEST_LIMIT_DS		= 0x4806ul,
297 	GUEST_LIMIT_FS		= 0x4808ul,
298 	GUEST_LIMIT_GS		= 0x480aul,
299 	GUEST_LIMIT_LDTR	= 0x480cul,
300 	GUEST_LIMIT_TR		= 0x480eul,
301 	GUEST_LIMIT_GDTR	= 0x4810ul,
302 	GUEST_LIMIT_IDTR	= 0x4812ul,
303 	GUEST_AR_ES		= 0x4814ul,
304 	GUEST_AR_CS		= 0x4816ul,
305 	GUEST_AR_SS		= 0x4818ul,
306 	GUEST_AR_DS		= 0x481aul,
307 	GUEST_AR_FS		= 0x481cul,
308 	GUEST_AR_GS		= 0x481eul,
309 	GUEST_AR_LDTR		= 0x4820ul,
310 	GUEST_AR_TR		= 0x4822ul,
311 	GUEST_INTR_STATE	= 0x4824ul,
312 	GUEST_ACTV_STATE	= 0x4826ul,
313 	GUEST_SMBASE		= 0x4828ul,
314 	GUEST_SYSENTER_CS	= 0x482aul,
315 	PREEMPT_TIMER_VALUE	= 0x482eul,
316 
317 	/* 32-Bit Host State Fields */
318 	HOST_SYSENTER_CS	= 0x4c00ul,
319 
320 	/* Natural-Width Control Fields */
321 	CR0_MASK		= 0x6000ul,
322 	CR4_MASK		= 0x6002ul,
323 	CR0_READ_SHADOW		= 0x6004ul,
324 	CR4_READ_SHADOW		= 0x6006ul,
325 	CR3_TARGET_0		= 0x6008ul,
326 	CR3_TARGET_1		= 0x600aul,
327 	CR3_TARGET_2		= 0x600cul,
328 	CR3_TARGET_3		= 0x600eul,
329 
330 	/* Natural-Width R/O Data Fields */
331 	EXI_QUALIFICATION	= 0x6400ul,
332 	IO_RCX			= 0x6402ul,
333 	IO_RSI			= 0x6404ul,
334 	IO_RDI			= 0x6406ul,
335 	IO_RIP			= 0x6408ul,
336 	GUEST_LINEAR_ADDRESS	= 0x640aul,
337 
338 	/* Natural-Width Guest State Fields */
339 	GUEST_CR0		= 0x6800ul,
340 	GUEST_CR3		= 0x6802ul,
341 	GUEST_CR4		= 0x6804ul,
342 	GUEST_BASE_ES		= 0x6806ul,
343 	GUEST_BASE_CS		= 0x6808ul,
344 	GUEST_BASE_SS		= 0x680aul,
345 	GUEST_BASE_DS		= 0x680cul,
346 	GUEST_BASE_FS		= 0x680eul,
347 	GUEST_BASE_GS		= 0x6810ul,
348 	GUEST_BASE_LDTR		= 0x6812ul,
349 	GUEST_BASE_TR		= 0x6814ul,
350 	GUEST_BASE_GDTR		= 0x6816ul,
351 	GUEST_BASE_IDTR		= 0x6818ul,
352 	GUEST_DR7		= 0x681aul,
353 	GUEST_RSP		= 0x681cul,
354 	GUEST_RIP		= 0x681eul,
355 	GUEST_RFLAGS		= 0x6820ul,
356 	GUEST_PENDING_DEBUG	= 0x6822ul,
357 	GUEST_SYSENTER_ESP	= 0x6824ul,
358 	GUEST_SYSENTER_EIP	= 0x6826ul,
359 
360 	/* Natural-Width Host State Fields */
361 	HOST_CR0		= 0x6c00ul,
362 	HOST_CR3		= 0x6c02ul,
363 	HOST_CR4		= 0x6c04ul,
364 	HOST_BASE_FS		= 0x6c06ul,
365 	HOST_BASE_GS		= 0x6c08ul,
366 	HOST_BASE_TR		= 0x6c0aul,
367 	HOST_BASE_GDTR		= 0x6c0cul,
368 	HOST_BASE_IDTR		= 0x6c0eul,
369 	HOST_SYSENTER_ESP	= 0x6c10ul,
370 	HOST_SYSENTER_EIP	= 0x6c12ul,
371 	HOST_RSP		= 0x6c14ul,
372 	HOST_RIP		= 0x6c16ul
373 };
374 
375 #define VMX_ENTRY_FAILURE	(1ul << 31)
376 #define VMX_ENTRY_FLAGS		(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
377 				 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
378 
379 enum Reason {
380 	VMX_EXC_NMI		= 0,
381 	VMX_EXTINT		= 1,
382 	VMX_TRIPLE_FAULT	= 2,
383 	VMX_INIT		= 3,
384 	VMX_SIPI		= 4,
385 	VMX_SMI_IO		= 5,
386 	VMX_SMI_OTHER		= 6,
387 	VMX_INTR_WINDOW		= 7,
388 	VMX_NMI_WINDOW		= 8,
389 	VMX_TASK_SWITCH		= 9,
390 	VMX_CPUID		= 10,
391 	VMX_GETSEC		= 11,
392 	VMX_HLT			= 12,
393 	VMX_INVD		= 13,
394 	VMX_INVLPG		= 14,
395 	VMX_RDPMC		= 15,
396 	VMX_RDTSC		= 16,
397 	VMX_RSM			= 17,
398 	VMX_VMCALL		= 18,
399 	VMX_VMCLEAR		= 19,
400 	VMX_VMLAUNCH		= 20,
401 	VMX_VMPTRLD		= 21,
402 	VMX_VMPTRST		= 22,
403 	VMX_VMREAD		= 23,
404 	VMX_VMRESUME		= 24,
405 	VMX_VMWRITE		= 25,
406 	VMX_VMXOFF		= 26,
407 	VMX_VMXON		= 27,
408 	VMX_CR			= 28,
409 	VMX_DR			= 29,
410 	VMX_IO			= 30,
411 	VMX_RDMSR		= 31,
412 	VMX_WRMSR		= 32,
413 	VMX_FAIL_STATE		= 33,
414 	VMX_FAIL_MSR		= 34,
415 	VMX_MWAIT		= 36,
416 	VMX_MTF			= 37,
417 	VMX_MONITOR		= 39,
418 	VMX_PAUSE		= 40,
419 	VMX_FAIL_MCHECK		= 41,
420 	VMX_TPR_THRESHOLD	= 43,
421 	VMX_APIC_ACCESS		= 44,
422 	VMX_EOI_INDUCED		= 45,
423 	VMX_GDTR_IDTR		= 46,
424 	VMX_LDTR_TR		= 47,
425 	VMX_EPT_VIOLATION	= 48,
426 	VMX_EPT_MISCONFIG	= 49,
427 	VMX_INVEPT		= 50,
428 	VMX_PREEMPT		= 52,
429 	VMX_INVVPID		= 53,
430 	VMX_WBINVD		= 54,
431 	VMX_XSETBV		= 55,
432 	VMX_APIC_WRITE		= 56,
433 	VMX_RDRAND		= 57,
434 	VMX_INVPCID		= 58,
435 	VMX_VMFUNC		= 59,
436 	VMX_RDSEED		= 61,
437 	VMX_PML_FULL		= 62,
438 	VMX_XSAVES		= 63,
439 	VMX_XRSTORS		= 64,
440 };
441 
442 enum Ctrl_exi {
443 	EXI_SAVE_DBGCTLS	= 1UL << 2,
444 	EXI_HOST_64		= 1UL << 9,
445 	EXI_LOAD_PERF		= 1UL << 12,
446 	EXI_INTA		= 1UL << 15,
447 	EXI_SAVE_PAT		= 1UL << 18,
448 	EXI_LOAD_PAT		= 1UL << 19,
449 	EXI_SAVE_EFER		= 1UL << 20,
450 	EXI_LOAD_EFER		= 1UL << 21,
451 	EXI_SAVE_PREEMPT	= 1UL << 22,
452 };
453 
454 enum Ctrl_ent {
455 	ENT_LOAD_DBGCTLS	= 1UL << 2,
456 	ENT_GUEST_64		= 1UL << 9,
457 	ENT_LOAD_PERF		= 1UL << 13,
458 	ENT_LOAD_PAT		= 1UL << 14,
459 	ENT_LOAD_EFER		= 1UL << 15,
460 	ENT_LOAD_BNDCFGS	= 1UL << 16
461 };
462 
463 enum Ctrl_pin {
464 	PIN_EXTINT		= 1ul << 0,
465 	PIN_NMI			= 1ul << 3,
466 	PIN_VIRT_NMI		= 1ul << 5,
467 	PIN_PREEMPT		= 1ul << 6,
468 	PIN_POST_INTR		= 1ul << 7,
469 };
470 
471 enum Ctrl0 {
472 	CPU_INTR_WINDOW		= 1ul << 2,
473 	CPU_USE_TSC_OFFSET	= 1ul << 3,
474 	CPU_HLT			= 1ul << 7,
475 	CPU_INVLPG		= 1ul << 9,
476 	CPU_MWAIT		= 1ul << 10,
477 	CPU_RDPMC		= 1ul << 11,
478 	CPU_RDTSC		= 1ul << 12,
479 	CPU_CR3_LOAD		= 1ul << 15,
480 	CPU_CR3_STORE		= 1ul << 16,
481 	CPU_CR8_LOAD		= 1ul << 19,
482 	CPU_CR8_STORE		= 1ul << 20,
483 	CPU_TPR_SHADOW		= 1ul << 21,
484 	CPU_NMI_WINDOW		= 1ul << 22,
485 	CPU_IO			= 1ul << 24,
486 	CPU_IO_BITMAP		= 1ul << 25,
487 	CPU_MTF			= 1ul << 27,
488 	CPU_MSR_BITMAP		= 1ul << 28,
489 	CPU_MONITOR		= 1ul << 29,
490 	CPU_PAUSE		= 1ul << 30,
491 	CPU_SECONDARY		= 1ul << 31,
492 };
493 
494 enum Ctrl1 {
495 	CPU_VIRT_APIC_ACCESSES	= 1ul << 0,
496 	CPU_EPT			= 1ul << 1,
497 	CPU_DESC_TABLE		= 1ul << 2,
498 	CPU_RDTSCP		= 1ul << 3,
499 	CPU_VIRT_X2APIC		= 1ul << 4,
500 	CPU_VPID		= 1ul << 5,
501 	CPU_WBINVD		= 1ul << 6,
502 	CPU_URG			= 1ul << 7,
503 	CPU_APIC_REG_VIRT	= 1ul << 8,
504 	CPU_VINTD		= 1ul << 9,
505 	CPU_RDRAND		= 1ul << 11,
506 	CPU_SHADOW_VMCS		= 1ul << 14,
507 	CPU_RDSEED		= 1ul << 16,
508 	CPU_PML                 = 1ul << 17,
509 	CPU_USE_TSC_SCALING	= 1ul << 25,
510 };
511 
512 enum Intr_type {
513 	VMX_INTR_TYPE_EXT_INTR = 0,
514 	VMX_INTR_TYPE_NMI_INTR = 2,
515 	VMX_INTR_TYPE_HARD_EXCEPTION = 3,
516 	VMX_INTR_TYPE_SOFT_INTR = 4,
517 	VMX_INTR_TYPE_SOFT_EXCEPTION = 6,
518 };
519 
520 /*
521  * Interruption-information format
522  */
523 #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
524 #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
525 #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
526 #define INTR_INFO_UNBLOCK_NMI_MASK      0x1000          /* 12 */
527 #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
528 
529 #define INTR_INFO_INTR_TYPE_SHIFT       8
530 
531 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
532 #define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
533 #define INTR_TYPE_NMI_INTR		(2 << 8) /* NMI */
534 #define INTR_TYPE_HARD_EXCEPTION	(3 << 8) /* processor exception */
535 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
536 #define INTR_TYPE_PRIV_SW_EXCEPTION	(5 << 8) /* priv. software exception */
537 #define INTR_TYPE_SOFT_EXCEPTION	(6 << 8) /* software exception */
538 #define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
539 
540 /*
541  * Guest interruptibility state
542  */
543 #define GUEST_INTR_STATE_STI		(1 << 0)
544 #define GUEST_INTR_STATE_MOVSS		(1 << 1)
545 #define GUEST_INTR_STATE_SMI		(1 << 2)
546 #define GUEST_INTR_STATE_NMI		(1 << 3)
547 #define GUEST_INTR_STATE_ENCLAVE	(1 << 4)
548 
549 /*
550  * VM-instruction error numbers
551  */
552 enum vm_instruction_error_number {
553 	VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
554 	VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
555 	VMXERR_VMCLEAR_VMXON_POINTER = 3,
556 	VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
557 	VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
558 	VMXERR_VMRESUME_AFTER_VMXOFF = 6,
559 	VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
560 	VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
561 	VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
562 	VMXERR_VMPTRLD_VMXON_POINTER = 10,
563 	VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
564 	VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
565 	VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
566 	VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
567 	VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
568 	VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
569 	VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
570 	VMXERR_VMCALL_NONCLEAR_VMCS = 19,
571 	VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
572 	VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
573 	VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
574 	VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
575 	VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
576 	VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
577 	VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
578 };
579 
580 enum vm_entry_failure_code {
581 	ENTRY_FAIL_DEFAULT		= 0,
582 	ENTRY_FAIL_PDPTE		= 2,
583 	ENTRY_FAIL_NMI			= 3,
584 	ENTRY_FAIL_VMCS_LINK_PTR	= 4,
585 };
586 
587 #define SAVE_GPR				\
588 	"xchg %rax, regs\n\t"			\
589 	"xchg %rcx, regs+0x8\n\t"		\
590 	"xchg %rdx, regs+0x10\n\t"		\
591 	"xchg %rbx, regs+0x18\n\t"		\
592 	"xchg %rbp, regs+0x28\n\t"		\
593 	"xchg %rsi, regs+0x30\n\t"		\
594 	"xchg %rdi, regs+0x38\n\t"		\
595 	"xchg %r8, regs+0x40\n\t"		\
596 	"xchg %r9, regs+0x48\n\t"		\
597 	"xchg %r10, regs+0x50\n\t"		\
598 	"xchg %r11, regs+0x58\n\t"		\
599 	"xchg %r12, regs+0x60\n\t"		\
600 	"xchg %r13, regs+0x68\n\t"		\
601 	"xchg %r14, regs+0x70\n\t"		\
602 	"xchg %r15, regs+0x78\n\t"
603 
604 #define LOAD_GPR	SAVE_GPR
605 
606 #define SAVE_GPR_C				\
607 	"xchg %%rax, regs\n\t"			\
608 	"xchg %%rcx, regs+0x8\n\t"		\
609 	"xchg %%rdx, regs+0x10\n\t"		\
610 	"xchg %%rbx, regs+0x18\n\t"		\
611 	"xchg %%rbp, regs+0x28\n\t"		\
612 	"xchg %%rsi, regs+0x30\n\t"		\
613 	"xchg %%rdi, regs+0x38\n\t"		\
614 	"xchg %%r8, regs+0x40\n\t"		\
615 	"xchg %%r9, regs+0x48\n\t"		\
616 	"xchg %%r10, regs+0x50\n\t"		\
617 	"xchg %%r11, regs+0x58\n\t"		\
618 	"xchg %%r12, regs+0x60\n\t"		\
619 	"xchg %%r13, regs+0x68\n\t"		\
620 	"xchg %%r14, regs+0x70\n\t"		\
621 	"xchg %%r15, regs+0x78\n\t"
622 
623 #define LOAD_GPR_C	SAVE_GPR_C
624 
625 #define VMX_IO_SIZE_MASK	0x7
626 #define _VMX_IO_BYTE		0
627 #define _VMX_IO_WORD		1
628 #define _VMX_IO_LONG		3
629 #define VMX_IO_DIRECTION_MASK	(1ul << 3)
630 #define VMX_IO_IN		(1ul << 3)
631 #define VMX_IO_OUT		0
632 #define VMX_IO_STRING		(1ul << 4)
633 #define VMX_IO_REP		(1ul << 5)
634 #define VMX_IO_OPRAND_IMM	(1ul << 6)
635 #define VMX_IO_PORT_MASK	0xFFFF0000
636 #define VMX_IO_PORT_SHIFT	16
637 
638 #define VMX_TEST_START		0
639 #define VMX_TEST_VMEXIT		1
640 #define VMX_TEST_EXIT		2
641 #define VMX_TEST_RESUME		3
642 #define VMX_TEST_VMABORT	4
643 #define VMX_TEST_VMSKIP		5
644 
645 #define HYPERCALL_BIT		(1ul << 12)
646 #define HYPERCALL_MASK		0xFFF
647 #define HYPERCALL_VMEXIT	0x1
648 #define HYPERCALL_VMABORT	0x2
649 #define HYPERCALL_VMSKIP	0x3
650 
651 #define EPTP_PG_WALK_LEN_SHIFT	3ul
652 #define EPTP_PG_WALK_LEN_MASK	0x38ul
653 #define EPTP_RESERV_BITS_MASK	0x1ful
654 #define EPTP_RESERV_BITS_SHIFT	0x7ul
655 #define EPTP_AD_FLAG		(1ul << 6)
656 
657 #define EPT_MEM_TYPE_UC		0ul
658 #define EPT_MEM_TYPE_WC		1ul
659 #define EPT_MEM_TYPE_WT		4ul
660 #define EPT_MEM_TYPE_WP		5ul
661 #define EPT_MEM_TYPE_WB		6ul
662 
663 #define EPT_RA			1ul
664 #define EPT_WA			2ul
665 #define EPT_EA			4ul
666 #define EPT_PRESENT		(EPT_RA | EPT_WA | EPT_EA)
667 #define EPT_ACCESS_FLAG		(1ul << 8)
668 #define EPT_DIRTY_FLAG		(1ul << 9)
669 #define EPT_LARGE_PAGE		(1ul << 7)
670 #define EPT_MEM_TYPE_SHIFT	3ul
671 #define EPT_MEM_TYPE_MASK	0x7ul
672 #define EPT_IGNORE_PAT		(1ul << 6)
673 #define EPT_SUPPRESS_VE		(1ull << 63)
674 
675 #define EPT_CAP_EXEC_ONLY	(1ull << 0)
676 #define EPT_CAP_PWL4		(1ull << 6)
677 #define EPT_CAP_PWL5		(1ull << 7)
678 #define EPT_CAP_UC		(1ull << 8)
679 #define EPT_CAP_WB		(1ull << 14)
680 #define EPT_CAP_2M_PAGE		(1ull << 16)
681 #define EPT_CAP_1G_PAGE		(1ull << 17)
682 #define EPT_CAP_INVEPT		(1ull << 20)
683 #define EPT_CAP_AD_FLAG		(1ull << 21)
684 #define EPT_CAP_ADV_EPT_INFO	(1ull << 22)
685 #define EPT_CAP_INVEPT_SINGLE	(1ull << 25)
686 #define EPT_CAP_INVEPT_ALL	(1ull << 26)
687 #define VPID_CAP_INVVPID	(1ull << 32)
688 #define VPID_CAP_INVVPID_ADDR   (1ull << 40)
689 #define VPID_CAP_INVVPID_CXTGLB (1ull << 41)
690 #define VPID_CAP_INVVPID_ALL    (1ull << 42)
691 #define VPID_CAP_INVVPID_CXTLOC	(1ull << 43)
692 
693 #define PAGE_SIZE_2M		(512 * PAGE_SIZE)
694 #define PAGE_SIZE_1G		(512 * PAGE_SIZE_2M)
695 #define EPT_PAGE_LEVEL		4
696 #define EPT_PGDIR_WIDTH		9
697 #define EPT_PGDIR_MASK		511
698 #define EPT_PGDIR_ENTRIES	(1 << EPT_PGDIR_WIDTH)
699 #define EPT_LEVEL_SHIFT(level)	(((level)-1) * EPT_PGDIR_WIDTH + 12)
700 #define EPT_ADDR_MASK		GENMASK_ULL(51, 12)
701 #define PAGE_MASK_2M		(~(PAGE_SIZE_2M-1))
702 
703 #define EPT_VLT_RD		(1ull << 0)
704 #define EPT_VLT_WR		(1ull << 1)
705 #define EPT_VLT_FETCH		(1ull << 2)
706 #define EPT_VLT_PERM_RD		(1ull << 3)
707 #define EPT_VLT_PERM_WR		(1ull << 4)
708 #define EPT_VLT_PERM_EX		(1ull << 5)
709 #define EPT_VLT_PERM_USER_EX	(1ull << 6)
710 #define EPT_VLT_PERMS		(EPT_VLT_PERM_RD | EPT_VLT_PERM_WR | \
711 				 EPT_VLT_PERM_EX)
712 #define EPT_VLT_LADDR_VLD	(1ull << 7)
713 #define EPT_VLT_PADDR		(1ull << 8)
714 #define EPT_VLT_GUEST_USER	(1ull << 9)
715 #define EPT_VLT_GUEST_RW	(1ull << 10)
716 #define EPT_VLT_GUEST_EX	(1ull << 11)
717 #define EPT_VLT_GUEST_MASK	(EPT_VLT_GUEST_USER | EPT_VLT_GUEST_RW | \
718 				 EPT_VLT_GUEST_EX)
719 
720 #define MAGIC_VAL_1		0x12345678ul
721 #define MAGIC_VAL_2		0x87654321ul
722 #define MAGIC_VAL_3		0xfffffffful
723 #define MAGIC_VAL_4		0xdeadbeeful
724 
725 #define INVEPT_SINGLE		1
726 #define INVEPT_GLOBAL		2
727 
728 #define INVVPID_ADDR            0
729 #define INVVPID_CONTEXT_GLOBAL	1
730 #define INVVPID_ALL		2
731 #define INVVPID_CONTEXT_LOCAL	3
732 
733 #define ACTV_ACTIVE		0
734 #define ACTV_HLT		1
735 #define ACTV_SHUTDOWN		2
736 #define ACTV_WAIT_SIPI		3
737 
738 /*
739  * VMCS field encoding:
740  * Bit 0: High-access
741  * Bits 1-9: Index
742  * Bits 10-12: Type
743  * Bits 13-15: Width
744  * Bits 15-64: Reserved
745  */
746 #define VMCS_FIELD_HIGH_SHIFT		(0)
747 #define VMCS_FIELD_INDEX_SHIFT		(1)
748 #define VMCS_FIELD_INDEX_MASK		GENMASK(9, 1)
749 #define VMCS_FIELD_TYPE_SHIFT		(10)
750 #define VMCS_FIELD_WIDTH_SHIFT		(13)
751 #define VMCS_FIELD_RESERVED_SHIFT	(15)
752 #define VMCS_FIELD_BIT_SIZE		(BITS_PER_LONG)
753 
754 extern struct regs regs;
755 
756 extern union vmx_basic_msr basic_msr;
757 extern union vmx_ctrl_msr ctrl_pin_rev;
758 extern union vmx_ctrl_msr ctrl_cpu_rev[2];
759 extern union vmx_ctrl_msr ctrl_exit_rev;
760 extern union vmx_ctrl_msr ctrl_enter_rev;
761 extern union vmx_ept_vpid  ept_vpid;
762 
ept_2m_supported(void)763 static inline bool ept_2m_supported(void)
764 {
765 	return ept_vpid.val & EPT_CAP_2M_PAGE;
766 }
767 
ept_1g_supported(void)768 static inline bool ept_1g_supported(void)
769 {
770 	return ept_vpid.val & EPT_CAP_1G_PAGE;
771 }
772 
ept_huge_pages_supported(int level)773 static inline bool ept_huge_pages_supported(int level)
774 {
775 	if (level == 2)
776 		return ept_2m_supported();
777 	else if (level == 3)
778 		return ept_1g_supported();
779 	else
780 		return false;
781 }
782 
ept_execute_only_supported(void)783 static inline bool ept_execute_only_supported(void)
784 {
785 	return ept_vpid.val & EPT_CAP_EXEC_ONLY;
786 }
787 
ept_ad_bits_supported(void)788 static inline bool ept_ad_bits_supported(void)
789 {
790 	return ept_vpid.val & EPT_CAP_AD_FLAG;
791 }
792 
is_4_level_ept_supported(void)793 static inline bool is_4_level_ept_supported(void)
794 {
795 	return ept_vpid.val & EPT_CAP_PWL4;
796 }
797 
is_5_level_ept_supported(void)798 static inline bool is_5_level_ept_supported(void)
799 {
800 	return ept_vpid.val & EPT_CAP_PWL5;
801 }
802 
is_ept_memtype_supported(int type)803 static inline bool is_ept_memtype_supported(int type)
804 {
805 	if (type == EPT_MEM_TYPE_UC)
806 		return ept_vpid.val & EPT_CAP_UC;
807 
808 	if (type == EPT_MEM_TYPE_WB)
809 		return ept_vpid.val & EPT_CAP_WB;
810 
811 	return false;
812 }
813 
is_invept_type_supported(u64 type)814 static inline bool is_invept_type_supported(u64 type)
815 {
816 	if (type < INVEPT_SINGLE || type > INVEPT_GLOBAL)
817 		return false;
818 
819 	return ept_vpid.val & (EPT_CAP_INVEPT_SINGLE << (type - INVEPT_SINGLE));
820 }
821 
is_vpid_supported(void)822 static inline bool is_vpid_supported(void)
823 {
824 	return (ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
825 	       (ctrl_cpu_rev[1].clr & CPU_VPID);
826 }
827 
is_invvpid_supported(void)828 static inline bool is_invvpid_supported(void)
829 {
830 	return ept_vpid.val & VPID_CAP_INVVPID;
831 }
832 
is_invvpid_type_supported(unsigned long type)833 static inline bool is_invvpid_type_supported(unsigned long type)
834 {
835 	if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL)
836 		return false;
837 
838 	return ept_vpid.val & (VPID_CAP_INVVPID_ADDR << (type - INVVPID_ADDR));
839 }
840 
841 extern u64 *bsp_vmxon_region;
842 extern bool launched;
843 
844 void vmx_set_test_stage(u32 s);
845 u32 vmx_get_test_stage(void);
846 void vmx_inc_test_stage(void);
847 
848 /* -1 on VM-Fail, 0 on success, >1 on fault */
__vmxon_safe(u64 * vmxon_region)849 static int __vmxon_safe(u64 *vmxon_region)
850 {
851 	bool vmfail;
852 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
853 
854 	asm volatile ("push %1\n\t"
855 		      "popf\n\t"
856 		      ASM_TRY("1f") "vmxon %2\n\t"
857 		      "setbe %0\n\t"
858 		      "jmp 2f\n\t"
859 		      "1: movb $0, %0\n\t"
860 		      "2:\n\t"
861 		      : "=q" (vmfail) : "q" (rflags), "m" (vmxon_region) : "cc");
862 
863 	if (vmfail)
864 		return -1;
865 
866 	return exception_vector();
867 }
868 
vmxon_safe(void)869 static int vmxon_safe(void)
870 {
871 	return __vmxon_safe(bsp_vmxon_region);
872 }
873 
vmx_on(void)874 static int vmx_on(void)
875 {
876 	return vmxon_safe();
877 }
878 
vmx_off(void)879 static int vmx_off(void)
880 {
881 	bool ret;
882 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
883 
884 	asm volatile("push %1; popf; vmxoff; setbe %0\n\t"
885 		     : "=q"(ret) : "q" (rflags) : "cc");
886 	return ret;
887 }
888 
make_vmcs_current(struct vmcs * vmcs)889 static inline int make_vmcs_current(struct vmcs *vmcs)
890 {
891 	bool ret;
892 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
893 
894 	asm volatile ("push %1; popf; vmptrld %2; setbe %0"
895 		      : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc");
896 	return ret;
897 }
898 
vmcs_clear(struct vmcs * vmcs)899 static inline int vmcs_clear(struct vmcs *vmcs)
900 {
901 	bool ret;
902 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
903 
904 	asm volatile ("push %1; popf; vmclear %2; setbe %0"
905 		      : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc");
906 	return ret;
907 }
908 
vmcs_read(enum Encoding enc)909 static inline u64 vmcs_read(enum Encoding enc)
910 {
911 	u64 val;
912 	asm volatile ("vmread %1, %0" : "=rm" (val) : "r" ((u64)enc) : "cc");
913 	return val;
914 }
915 
916 /*
917  * VMREAD with a guaranteed memory operand, used to test KVM's MMU by forcing
918  * KVM to translate GVA->GPA.
919  */
vmcs_readm(enum Encoding enc)920 static inline u64 vmcs_readm(enum Encoding enc)
921 {
922 	u64 val;
923 
924 	asm volatile ("vmread %1, %0" : "=m" (val) : "r" ((u64)enc) : "cc");
925 	return val;
926 }
927 
vmcs_read_safe(enum Encoding enc,u64 * value)928 static inline int vmcs_read_safe(enum Encoding enc, u64 *value)
929 {
930 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
931 	u64 encoding = enc;
932 	u64 val;
933 
934 	asm volatile ("shl $8, %%rax;"
935 		      "sahf;"
936 		      "vmread %[encoding], %[val];"
937 		      "lahf;"
938 		      "shr $8, %%rax"
939 		      : /* output */ [val]"=rm"(val), "+a"(rflags)
940 		      : /* input */ [encoding]"r"(encoding)
941 		      : /* clobber */ "cc");
942 
943 	*value = val;
944 	return rflags & (X86_EFLAGS_CF | X86_EFLAGS_ZF);
945 }
946 
vmcs_write(enum Encoding enc,u64 val)947 static inline int vmcs_write(enum Encoding enc, u64 val)
948 {
949 	bool ret;
950 	asm volatile ("vmwrite %1, %2; setbe %0"
951 		: "=q"(ret) : "rm" (val), "r" ((u64)enc) : "cc");
952 	return ret;
953 }
954 
vmcs_set_bits(enum Encoding enc,u64 val)955 static inline int vmcs_set_bits(enum Encoding enc, u64 val)
956 {
957 	return vmcs_write(enc, vmcs_read(enc) | val);
958 }
959 
vmcs_clear_bits(enum Encoding enc,u64 val)960 static inline int vmcs_clear_bits(enum Encoding enc, u64 val)
961 {
962 	return vmcs_write(enc, vmcs_read(enc) & ~val);
963 }
964 
vmcs_save(struct vmcs ** vmcs)965 static inline int vmcs_save(struct vmcs **vmcs)
966 {
967 	bool ret;
968 	unsigned long pa;
969 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
970 
971 	asm volatile ("push %2; popf; vmptrst %1; setbe %0"
972 		      : "=q" (ret), "=m" (pa) : "r" (rflags) : "cc");
973 	*vmcs = (pa == -1ull) ? NULL : phys_to_virt(pa);
974 	return ret;
975 }
976 
__invept(unsigned long type,u64 eptp)977 static inline int __invept(unsigned long type, u64 eptp)
978 {
979 	bool failed = false;
980 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
981 
982 	struct {
983 		u64 eptp, gpa;
984 	} operand = {eptp, 0};
985 	asm volatile("push %1; popf; invept %2, %3; setbe %0"
986 		     : "=q" (failed) : "r" (rflags), "m"(operand),"r"(type) : "cc");
987 	return failed ? -1: 0;
988 }
989 
invept(unsigned long type,u64 eptp)990 static inline void invept(unsigned long type, u64 eptp)
991 {
992 	__TEST_ASSERT(!__invept(type, eptp));
993 }
994 
__invvpid(unsigned long type,u64 vpid,u64 gla)995 static inline int __invvpid(unsigned long type, u64 vpid, u64 gla)
996 {
997 	bool failed = false;
998 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
999 
1000 	struct invvpid_operand operand = {vpid, gla};
1001 	asm volatile("push %1; popf; invvpid %2, %3; setbe %0"
1002 		     : "=q" (failed) : "r" (rflags), "m"(operand),"r"(type) : "cc");
1003 	return failed ? -1: 0;
1004 }
1005 
invvpid(unsigned long type,u64 vpid,u64 gla)1006 static inline void invvpid(unsigned long type, u64 vpid, u64 gla)
1007 {
1008 	__TEST_ASSERT(!__invvpid(type, vpid, gla));
1009 }
1010 
1011 void enable_vmx(void);
1012 void init_vmx(u64 *vmxon_region);
1013 int init_vmcs(struct vmcs **vmcs);
1014 
1015 const char *exit_reason_description(u64 reason);
1016 void print_vmexit_info(union exit_reason exit_reason);
1017 void print_vmentry_failure_info(struct vmentry_result *result);
1018 void install_ept_entry(unsigned long *pml4, int pte_level,
1019 		unsigned long guest_addr, unsigned long pte,
1020 		unsigned long *pt_page);
1021 void install_1g_ept(unsigned long *pml4, unsigned long phys,
1022 		unsigned long guest_addr, u64 perm);
1023 void install_2m_ept(unsigned long *pml4, unsigned long phys,
1024 		unsigned long guest_addr, u64 perm);
1025 void install_ept(unsigned long *pml4, unsigned long phys,
1026 		unsigned long guest_addr, u64 perm);
1027 void setup_ept_range(unsigned long *pml4, unsigned long start,
1028 		     unsigned long len, int map_1g, int map_2m, u64 perm);
1029 bool get_ept_pte(unsigned long *pml4, unsigned long guest_addr, int level,
1030 		unsigned long *pte);
1031 void set_ept_pte(unsigned long *pml4, unsigned long guest_addr,
1032 		int level, u64 pte_val);
1033 void check_ept_ad(unsigned long *pml4, u64 guest_cr3,
1034 		  unsigned long guest_addr, int expected_gpa_ad,
1035 		  int expected_pt_ad);
1036 void clear_ept_ad(unsigned long *pml4, u64 guest_cr3,
1037 		  unsigned long guest_addr);
1038 
1039 #define        ABORT_ON_EARLY_VMENTRY_FAIL     0x1
1040 #define        ABORT_ON_INVALID_GUEST_STATE    0x2
1041 
1042 void __enter_guest(u8 abort_flag, struct vmentry_result *result);
1043 void enter_guest(void);
1044 void enter_guest_with_bad_controls(void);
1045 void hypercall(u32 hypercall_no);
1046 
1047 typedef void (*test_guest_func)(void);
1048 typedef void (*test_teardown_func)(void *data);
1049 void test_set_guest(test_guest_func func);
1050 void test_override_guest(test_guest_func func);
1051 void test_add_teardown(test_teardown_func func, void *data);
1052 void test_skip(const char *msg);
1053 void test_set_guest_finished(void);
1054 
1055 #endif
1056