xref: /kvm-unit-tests/x86/vmx.h (revision dfc1fec2fbde04ad607e1aed560cf7059350c70f)
1 #ifndef X86_VMX_H
2 #define X86_VMX_H
3 
4 #include "libcflat.h"
5 #include "processor.h"
6 #include "bitops.h"
7 #include "util.h"
8 #include "asm/page.h"
9 #include "asm/io.h"
10 
11 void __abort_test(void);
12 
13 #define __TEST_ASSERT(cond)					\
14 do {								\
15 	if (!(cond)) {						\
16 		report_fail("%s:%d: Assertion failed: %s",	\
17 			    __FILE__, __LINE__, #cond);		\
18 		dump_stack();					\
19 		__abort_test();					\
20 	}							\
21 } while (0)
22 
23 #define TEST_ASSERT(cond)					\
24 do {								\
25 	__TEST_ASSERT(cond);					\
26 	report_passed();					\
27 } while (0)
28 
29 #define TEST_ASSERT_MSG(cond, fmt, args...)			\
30 do {								\
31 	if (!(cond)) {						\
32 		report_fail("%s:%d: Assertion failed: %s\n" fmt,\
33 			    __FILE__, __LINE__, #cond, ##args);	\
34 		dump_stack();					\
35 		__abort_test();					\
36 	}							\
37 	report_passed();					\
38 } while (0)
39 
40 #define TEST_ASSERT_EQ(a, b) __TEST_EQ(a, b, #a, #b, 1, __abort_test, "")
41 #define TEST_ASSERT_EQ_MSG(a, b, fmt, args...) \
42 	__TEST_EQ(a, b, #a, #b, 1, __abort_test, fmt, ## args)
43 
44 struct vmcs_hdr {
45 	u32 revision_id:31;
46 	u32 shadow_vmcs:1;
47 };
48 
49 struct vmcs {
50 	struct vmcs_hdr hdr;
51 	u32 abort; /* VMX-abort indicator */
52 	/* VMCS data */
53 	char data[0];
54 };
55 
56 struct invvpid_operand {
57 	u64 vpid;
58 	u64 gla;
59 };
60 
61 struct regs {
62 	u64 rax;
63 	u64 rcx;
64 	u64 rdx;
65 	u64 rbx;
66 	u64 cr2;
67 	u64 rbp;
68 	u64 rsi;
69 	u64 rdi;
70 	u64 r8;
71 	u64 r9;
72 	u64 r10;
73 	u64 r11;
74 	u64 r12;
75 	u64 r13;
76 	u64 r14;
77 	u64 r15;
78 	u64 rflags;
79 };
80 
81 union exit_reason {
82 	struct {
83 		u32	basic			: 16;
84 		u32	reserved16		: 1;
85 		u32	reserved17		: 1;
86 		u32	reserved18		: 1;
87 		u32	reserved19		: 1;
88 		u32	reserved20		: 1;
89 		u32	reserved21		: 1;
90 		u32	reserved22		: 1;
91 		u32	reserved23		: 1;
92 		u32	reserved24		: 1;
93 		u32	reserved25		: 1;
94 		u32	reserved26		: 1;
95 		u32	enclave_mode		: 1;
96 		u32	smi_pending_mtf		: 1;
97 		u32	smi_from_vmx_root	: 1;
98 		u32	reserved30		: 1;
99 		u32	failed_vmentry		: 1;
100 	};
101 	u32 full;
102 };
103 
104 struct vmentry_result {
105 	/* Instruction mnemonic (for convenience). */
106 	const char *instr;
107 	/* Did the test attempt vmlaunch or vmresume? */
108 	bool vmlaunch;
109 	/* Did the instruction VM-Fail? */
110 	bool vm_fail;
111 	/* Did the VM-Entry fully enter the guest? */
112 	bool entered;
113 	/* VM-Exit reason, valid iff !vm_fail */
114 	union exit_reason exit_reason;
115 	/* Contents of [re]flags after failed entry. */
116 	unsigned long flags;
117 };
118 
119 struct vmx_test {
120 	const char *name;
121 	int (*init)(struct vmcs *vmcs);
122 	void (*guest_main)(void);
123 	int (*exit_handler)(union exit_reason exit_reason);
124 	void (*syscall_handler)(u64 syscall_no);
125 	struct regs guest_regs;
126 	int (*entry_failure_handler)(struct vmentry_result *result);
127 	struct vmcs *vmcs;
128 	int exits;
129 	/* Alternative test interface. */
130 	void (*v2)(void);
131 };
132 
133 union vmx_basic {
134 	u64 val;
135 	struct {
136 		u32 revision;
137 		u32	size:13,
138 			reserved1: 3,
139 			width:1,
140 			dual:1,
141 			type:4,
142 			insouts:1,
143 			ctrl:1,
144 			reserved2:8;
145 	};
146 };
147 
148 union vmx_ctrl_msr {
149 	u64 val;
150 	struct {
151 		u32 set, clr;
152 	};
153 };
154 
155 union vmx_misc {
156 	u64 val;
157 	struct {
158 		u32 pt_bit:5,
159 		    stores_lma:1,
160 		    act_hlt:1,
161 		    act_shutdown:1,
162 		    act_wfsipi:1,
163 		    :5,
164 		    vmx_pt:1,
165 		    smm_smbase:1,
166 		    cr3_targets:9,
167 		    msr_list_size:3,
168 		    smm_mon_ctl:1,
169 		    vmwrite_any:1,
170 		    inject_len0:1,
171 		    :1;
172 		u32 mseg_revision;
173 	};
174 };
175 
176 union vmx_ept_vpid {
177 	u64 val;
178 	struct {
179 		u32:16,
180 			super:2,
181 			: 2,
182 			invept:1,
183 			: 11;
184 		u32	invvpid:1;
185 	};
186 };
187 
188 enum Encoding {
189 	/* 16-Bit Control Fields */
190 	VPID			= 0x0000ul,
191 	/* Posted-interrupt notification vector */
192 	PINV			= 0x0002ul,
193 	/* EPTP index */
194 	EPTP_IDX		= 0x0004ul,
195 
196 	/* 16-Bit Guest State Fields */
197 	GUEST_SEL_ES		= 0x0800ul,
198 	GUEST_SEL_CS		= 0x0802ul,
199 	GUEST_SEL_SS		= 0x0804ul,
200 	GUEST_SEL_DS		= 0x0806ul,
201 	GUEST_SEL_FS		= 0x0808ul,
202 	GUEST_SEL_GS		= 0x080aul,
203 	GUEST_SEL_LDTR		= 0x080cul,
204 	GUEST_SEL_TR		= 0x080eul,
205 	GUEST_INT_STATUS	= 0x0810ul,
206 	GUEST_PML_INDEX         = 0x0812ul,
207 
208 	/* 16-Bit Host State Fields */
209 	HOST_SEL_ES		= 0x0c00ul,
210 	HOST_SEL_CS		= 0x0c02ul,
211 	HOST_SEL_SS		= 0x0c04ul,
212 	HOST_SEL_DS		= 0x0c06ul,
213 	HOST_SEL_FS		= 0x0c08ul,
214 	HOST_SEL_GS		= 0x0c0aul,
215 	HOST_SEL_TR		= 0x0c0cul,
216 
217 	/* 64-Bit Control Fields */
218 	IO_BITMAP_A		= 0x2000ul,
219 	IO_BITMAP_B		= 0x2002ul,
220 	MSR_BITMAP		= 0x2004ul,
221 	EXIT_MSR_ST_ADDR	= 0x2006ul,
222 	EXIT_MSR_LD_ADDR	= 0x2008ul,
223 	ENTER_MSR_LD_ADDR	= 0x200aul,
224 	VMCS_EXEC_PTR		= 0x200cul,
225 	TSC_OFFSET		= 0x2010ul,
226 	TSC_OFFSET_HI		= 0x2011ul,
227 	APIC_VIRT_ADDR		= 0x2012ul,
228 	APIC_ACCS_ADDR		= 0x2014ul,
229 	POSTED_INTR_DESC_ADDR	= 0x2016ul,
230 	EPTP			= 0x201aul,
231 	EPTP_HI			= 0x201bul,
232 	VMREAD_BITMAP           = 0x2026ul,
233 	VMREAD_BITMAP_HI        = 0x2027ul,
234 	VMWRITE_BITMAP          = 0x2028ul,
235 	VMWRITE_BITMAP_HI       = 0x2029ul,
236 	EOI_EXIT_BITMAP0	= 0x201cul,
237 	EOI_EXIT_BITMAP1	= 0x201eul,
238 	EOI_EXIT_BITMAP2	= 0x2020ul,
239 	EOI_EXIT_BITMAP3	= 0x2022ul,
240 	PMLADDR                 = 0x200eul,
241 	PMLADDR_HI              = 0x200ful,
242 
243 
244 	/* 64-Bit Readonly Data Field */
245 	INFO_PHYS_ADDR		= 0x2400ul,
246 
247 	/* 64-Bit Guest State */
248 	VMCS_LINK_PTR		= 0x2800ul,
249 	VMCS_LINK_PTR_HI	= 0x2801ul,
250 	GUEST_DEBUGCTL		= 0x2802ul,
251 	GUEST_DEBUGCTL_HI	= 0x2803ul,
252 	GUEST_EFER		= 0x2806ul,
253 	GUEST_PAT		= 0x2804ul,
254 	GUEST_PERF_GLOBAL_CTRL	= 0x2808ul,
255 	GUEST_PDPTE		= 0x280aul,
256 	GUEST_BNDCFGS		= 0x2812ul,
257 
258 	/* 64-Bit Host State */
259 	HOST_PAT		= 0x2c00ul,
260 	HOST_EFER		= 0x2c02ul,
261 	HOST_PERF_GLOBAL_CTRL	= 0x2c04ul,
262 
263 	/* 32-Bit Control Fields */
264 	PIN_CONTROLS		= 0x4000ul,
265 	CPU_EXEC_CTRL0		= 0x4002ul,
266 	EXC_BITMAP		= 0x4004ul,
267 	PF_ERROR_MASK		= 0x4006ul,
268 	PF_ERROR_MATCH		= 0x4008ul,
269 	CR3_TARGET_COUNT	= 0x400aul,
270 	EXI_CONTROLS		= 0x400cul,
271 	EXI_MSR_ST_CNT		= 0x400eul,
272 	EXI_MSR_LD_CNT		= 0x4010ul,
273 	ENT_CONTROLS		= 0x4012ul,
274 	ENT_MSR_LD_CNT		= 0x4014ul,
275 	ENT_INTR_INFO		= 0x4016ul,
276 	ENT_INTR_ERROR		= 0x4018ul,
277 	ENT_INST_LEN		= 0x401aul,
278 	TPR_THRESHOLD		= 0x401cul,
279 	CPU_EXEC_CTRL1		= 0x401eul,
280 
281 	/* 32-Bit R/O Data Fields */
282 	VMX_INST_ERROR		= 0x4400ul,
283 	EXI_REASON		= 0x4402ul,
284 	EXI_INTR_INFO		= 0x4404ul,
285 	EXI_INTR_ERROR		= 0x4406ul,
286 	IDT_VECT_INFO		= 0x4408ul,
287 	IDT_VECT_ERROR		= 0x440aul,
288 	EXI_INST_LEN		= 0x440cul,
289 	EXI_INST_INFO		= 0x440eul,
290 
291 	/* 32-Bit Guest State Fields */
292 	GUEST_LIMIT_ES		= 0x4800ul,
293 	GUEST_LIMIT_CS		= 0x4802ul,
294 	GUEST_LIMIT_SS		= 0x4804ul,
295 	GUEST_LIMIT_DS		= 0x4806ul,
296 	GUEST_LIMIT_FS		= 0x4808ul,
297 	GUEST_LIMIT_GS		= 0x480aul,
298 	GUEST_LIMIT_LDTR	= 0x480cul,
299 	GUEST_LIMIT_TR		= 0x480eul,
300 	GUEST_LIMIT_GDTR	= 0x4810ul,
301 	GUEST_LIMIT_IDTR	= 0x4812ul,
302 	GUEST_AR_ES		= 0x4814ul,
303 	GUEST_AR_CS		= 0x4816ul,
304 	GUEST_AR_SS		= 0x4818ul,
305 	GUEST_AR_DS		= 0x481aul,
306 	GUEST_AR_FS		= 0x481cul,
307 	GUEST_AR_GS		= 0x481eul,
308 	GUEST_AR_LDTR		= 0x4820ul,
309 	GUEST_AR_TR		= 0x4822ul,
310 	GUEST_INTR_STATE	= 0x4824ul,
311 	GUEST_ACTV_STATE	= 0x4826ul,
312 	GUEST_SMBASE		= 0x4828ul,
313 	GUEST_SYSENTER_CS	= 0x482aul,
314 	PREEMPT_TIMER_VALUE	= 0x482eul,
315 
316 	/* 32-Bit Host State Fields */
317 	HOST_SYSENTER_CS	= 0x4c00ul,
318 
319 	/* Natural-Width Control Fields */
320 	CR0_MASK		= 0x6000ul,
321 	CR4_MASK		= 0x6002ul,
322 	CR0_READ_SHADOW		= 0x6004ul,
323 	CR4_READ_SHADOW		= 0x6006ul,
324 	CR3_TARGET_0		= 0x6008ul,
325 	CR3_TARGET_1		= 0x600aul,
326 	CR3_TARGET_2		= 0x600cul,
327 	CR3_TARGET_3		= 0x600eul,
328 
329 	/* Natural-Width R/O Data Fields */
330 	EXI_QUALIFICATION	= 0x6400ul,
331 	IO_RCX			= 0x6402ul,
332 	IO_RSI			= 0x6404ul,
333 	IO_RDI			= 0x6406ul,
334 	IO_RIP			= 0x6408ul,
335 	GUEST_LINEAR_ADDRESS	= 0x640aul,
336 
337 	/* Natural-Width Guest State Fields */
338 	GUEST_CR0		= 0x6800ul,
339 	GUEST_CR3		= 0x6802ul,
340 	GUEST_CR4		= 0x6804ul,
341 	GUEST_BASE_ES		= 0x6806ul,
342 	GUEST_BASE_CS		= 0x6808ul,
343 	GUEST_BASE_SS		= 0x680aul,
344 	GUEST_BASE_DS		= 0x680cul,
345 	GUEST_BASE_FS		= 0x680eul,
346 	GUEST_BASE_GS		= 0x6810ul,
347 	GUEST_BASE_LDTR		= 0x6812ul,
348 	GUEST_BASE_TR		= 0x6814ul,
349 	GUEST_BASE_GDTR		= 0x6816ul,
350 	GUEST_BASE_IDTR		= 0x6818ul,
351 	GUEST_DR7		= 0x681aul,
352 	GUEST_RSP		= 0x681cul,
353 	GUEST_RIP		= 0x681eul,
354 	GUEST_RFLAGS		= 0x6820ul,
355 	GUEST_PENDING_DEBUG	= 0x6822ul,
356 	GUEST_SYSENTER_ESP	= 0x6824ul,
357 	GUEST_SYSENTER_EIP	= 0x6826ul,
358 
359 	/* Natural-Width Host State Fields */
360 	HOST_CR0		= 0x6c00ul,
361 	HOST_CR3		= 0x6c02ul,
362 	HOST_CR4		= 0x6c04ul,
363 	HOST_BASE_FS		= 0x6c06ul,
364 	HOST_BASE_GS		= 0x6c08ul,
365 	HOST_BASE_TR		= 0x6c0aul,
366 	HOST_BASE_GDTR		= 0x6c0cul,
367 	HOST_BASE_IDTR		= 0x6c0eul,
368 	HOST_SYSENTER_ESP	= 0x6c10ul,
369 	HOST_SYSENTER_EIP	= 0x6c12ul,
370 	HOST_RSP		= 0x6c14ul,
371 	HOST_RIP		= 0x6c16ul
372 };
373 
374 #define VMX_ENTRY_FAILURE	(1ul << 31)
375 #define VMX_ENTRY_FLAGS		(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
376 				 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
377 
378 enum Reason {
379 	VMX_EXC_NMI		= 0,
380 	VMX_EXTINT		= 1,
381 	VMX_TRIPLE_FAULT	= 2,
382 	VMX_INIT		= 3,
383 	VMX_SIPI		= 4,
384 	VMX_SMI_IO		= 5,
385 	VMX_SMI_OTHER		= 6,
386 	VMX_INTR_WINDOW		= 7,
387 	VMX_NMI_WINDOW		= 8,
388 	VMX_TASK_SWITCH		= 9,
389 	VMX_CPUID		= 10,
390 	VMX_GETSEC		= 11,
391 	VMX_HLT			= 12,
392 	VMX_INVD		= 13,
393 	VMX_INVLPG		= 14,
394 	VMX_RDPMC		= 15,
395 	VMX_RDTSC		= 16,
396 	VMX_RSM			= 17,
397 	VMX_VMCALL		= 18,
398 	VMX_VMCLEAR		= 19,
399 	VMX_VMLAUNCH		= 20,
400 	VMX_VMPTRLD		= 21,
401 	VMX_VMPTRST		= 22,
402 	VMX_VMREAD		= 23,
403 	VMX_VMRESUME		= 24,
404 	VMX_VMWRITE		= 25,
405 	VMX_VMXOFF		= 26,
406 	VMX_VMXON		= 27,
407 	VMX_CR			= 28,
408 	VMX_DR			= 29,
409 	VMX_IO			= 30,
410 	VMX_RDMSR		= 31,
411 	VMX_WRMSR		= 32,
412 	VMX_FAIL_STATE		= 33,
413 	VMX_FAIL_MSR		= 34,
414 	VMX_MWAIT		= 36,
415 	VMX_MTF			= 37,
416 	VMX_MONITOR		= 39,
417 	VMX_PAUSE		= 40,
418 	VMX_FAIL_MCHECK		= 41,
419 	VMX_TPR_THRESHOLD	= 43,
420 	VMX_APIC_ACCESS		= 44,
421 	VMX_EOI_INDUCED		= 45,
422 	VMX_GDTR_IDTR		= 46,
423 	VMX_LDTR_TR		= 47,
424 	VMX_EPT_VIOLATION	= 48,
425 	VMX_EPT_MISCONFIG	= 49,
426 	VMX_INVEPT		= 50,
427 	VMX_PREEMPT		= 52,
428 	VMX_INVVPID		= 53,
429 	VMX_WBINVD		= 54,
430 	VMX_XSETBV		= 55,
431 	VMX_APIC_WRITE		= 56,
432 	VMX_RDRAND		= 57,
433 	VMX_INVPCID		= 58,
434 	VMX_VMFUNC		= 59,
435 	VMX_RDSEED		= 61,
436 	VMX_PML_FULL		= 62,
437 	VMX_XSAVES		= 63,
438 	VMX_XRSTORS		= 64,
439 };
440 
441 enum Ctrl_exi {
442 	EXI_SAVE_DBGCTLS	= 1UL << 2,
443 	EXI_HOST_64		= 1UL << 9,
444 	EXI_LOAD_PERF		= 1UL << 12,
445 	EXI_INTA		= 1UL << 15,
446 	EXI_SAVE_PAT		= 1UL << 18,
447 	EXI_LOAD_PAT		= 1UL << 19,
448 	EXI_SAVE_EFER		= 1UL << 20,
449 	EXI_LOAD_EFER		= 1UL << 21,
450 	EXI_SAVE_PREEMPT	= 1UL << 22,
451 };
452 
453 enum Ctrl_ent {
454 	ENT_LOAD_DBGCTLS	= 1UL << 2,
455 	ENT_GUEST_64		= 1UL << 9,
456 	ENT_LOAD_PERF		= 1UL << 13,
457 	ENT_LOAD_PAT		= 1UL << 14,
458 	ENT_LOAD_EFER		= 1UL << 15,
459 	ENT_LOAD_BNDCFGS	= 1UL << 16
460 };
461 
462 enum Ctrl_pin {
463 	PIN_EXTINT		= 1ul << 0,
464 	PIN_NMI			= 1ul << 3,
465 	PIN_VIRT_NMI		= 1ul << 5,
466 	PIN_PREEMPT		= 1ul << 6,
467 	PIN_POST_INTR		= 1ul << 7,
468 };
469 
470 enum Ctrl0 {
471 	CPU_INTR_WINDOW		= 1ul << 2,
472 	CPU_USE_TSC_OFFSET	= 1ul << 3,
473 	CPU_HLT			= 1ul << 7,
474 	CPU_INVLPG		= 1ul << 9,
475 	CPU_MWAIT		= 1ul << 10,
476 	CPU_RDPMC		= 1ul << 11,
477 	CPU_RDTSC		= 1ul << 12,
478 	CPU_CR3_LOAD		= 1ul << 15,
479 	CPU_CR3_STORE		= 1ul << 16,
480 	CPU_CR8_LOAD		= 1ul << 19,
481 	CPU_CR8_STORE		= 1ul << 20,
482 	CPU_TPR_SHADOW		= 1ul << 21,
483 	CPU_NMI_WINDOW		= 1ul << 22,
484 	CPU_IO			= 1ul << 24,
485 	CPU_IO_BITMAP		= 1ul << 25,
486 	CPU_MTF			= 1ul << 27,
487 	CPU_MSR_BITMAP		= 1ul << 28,
488 	CPU_MONITOR		= 1ul << 29,
489 	CPU_PAUSE		= 1ul << 30,
490 	CPU_SECONDARY		= 1ul << 31,
491 };
492 
493 enum Ctrl1 {
494 	CPU_VIRT_APIC_ACCESSES	= 1ul << 0,
495 	CPU_EPT			= 1ul << 1,
496 	CPU_DESC_TABLE		= 1ul << 2,
497 	CPU_RDTSCP		= 1ul << 3,
498 	CPU_VIRT_X2APIC		= 1ul << 4,
499 	CPU_VPID		= 1ul << 5,
500 	CPU_WBINVD		= 1ul << 6,
501 	CPU_URG			= 1ul << 7,
502 	CPU_APIC_REG_VIRT	= 1ul << 8,
503 	CPU_VINTD		= 1ul << 9,
504 	CPU_RDRAND		= 1ul << 11,
505 	CPU_SHADOW_VMCS		= 1ul << 14,
506 	CPU_RDSEED		= 1ul << 16,
507 	CPU_PML                 = 1ul << 17,
508 	CPU_USE_TSC_SCALING	= 1ul << 25,
509 };
510 
511 enum Intr_type {
512 	VMX_INTR_TYPE_EXT_INTR = 0,
513 	VMX_INTR_TYPE_NMI_INTR = 2,
514 	VMX_INTR_TYPE_HARD_EXCEPTION = 3,
515 	VMX_INTR_TYPE_SOFT_INTR = 4,
516 	VMX_INTR_TYPE_SOFT_EXCEPTION = 6,
517 };
518 
519 /*
520  * Interruption-information format
521  */
522 #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
523 #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
524 #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
525 #define INTR_INFO_UNBLOCK_NMI_MASK      0x1000          /* 12 */
526 #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
527 
528 #define INTR_INFO_INTR_TYPE_SHIFT       8
529 
530 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
531 #define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
532 #define INTR_TYPE_NMI_INTR		(2 << 8) /* NMI */
533 #define INTR_TYPE_HARD_EXCEPTION	(3 << 8) /* processor exception */
534 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
535 #define INTR_TYPE_PRIV_SW_EXCEPTION	(5 << 8) /* priv. software exception */
536 #define INTR_TYPE_SOFT_EXCEPTION	(6 << 8) /* software exception */
537 #define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
538 
539 /*
540  * Guest interruptibility state
541  */
542 #define GUEST_INTR_STATE_STI		(1 << 0)
543 #define GUEST_INTR_STATE_MOVSS		(1 << 1)
544 #define GUEST_INTR_STATE_SMI		(1 << 2)
545 #define GUEST_INTR_STATE_NMI		(1 << 3)
546 #define GUEST_INTR_STATE_ENCLAVE	(1 << 4)
547 
548 /*
549  * VM-instruction error numbers
550  */
551 enum vm_instruction_error_number {
552 	VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
553 	VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
554 	VMXERR_VMCLEAR_VMXON_POINTER = 3,
555 	VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
556 	VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
557 	VMXERR_VMRESUME_AFTER_VMXOFF = 6,
558 	VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
559 	VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
560 	VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
561 	VMXERR_VMPTRLD_VMXON_POINTER = 10,
562 	VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
563 	VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
564 	VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
565 	VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
566 	VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
567 	VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
568 	VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
569 	VMXERR_VMCALL_NONCLEAR_VMCS = 19,
570 	VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
571 	VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
572 	VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
573 	VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
574 	VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
575 	VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
576 	VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
577 };
578 
579 enum vm_entry_failure_code {
580 	ENTRY_FAIL_DEFAULT		= 0,
581 	ENTRY_FAIL_PDPTE		= 2,
582 	ENTRY_FAIL_NMI			= 3,
583 	ENTRY_FAIL_VMCS_LINK_PTR	= 4,
584 };
585 
586 #define SAVE_GPR				\
587 	"xchg %rax, regs\n\t"			\
588 	"xchg %rcx, regs+0x8\n\t"		\
589 	"xchg %rdx, regs+0x10\n\t"		\
590 	"xchg %rbx, regs+0x18\n\t"		\
591 	"xchg %rbp, regs+0x28\n\t"		\
592 	"xchg %rsi, regs+0x30\n\t"		\
593 	"xchg %rdi, regs+0x38\n\t"		\
594 	"xchg %r8, regs+0x40\n\t"		\
595 	"xchg %r9, regs+0x48\n\t"		\
596 	"xchg %r10, regs+0x50\n\t"		\
597 	"xchg %r11, regs+0x58\n\t"		\
598 	"xchg %r12, regs+0x60\n\t"		\
599 	"xchg %r13, regs+0x68\n\t"		\
600 	"xchg %r14, regs+0x70\n\t"		\
601 	"xchg %r15, regs+0x78\n\t"
602 
603 #define LOAD_GPR	SAVE_GPR
604 
605 #define SAVE_GPR_C				\
606 	"xchg %%rax, regs\n\t"			\
607 	"xchg %%rcx, regs+0x8\n\t"		\
608 	"xchg %%rdx, regs+0x10\n\t"		\
609 	"xchg %%rbx, regs+0x18\n\t"		\
610 	"xchg %%rbp, regs+0x28\n\t"		\
611 	"xchg %%rsi, regs+0x30\n\t"		\
612 	"xchg %%rdi, regs+0x38\n\t"		\
613 	"xchg %%r8, regs+0x40\n\t"		\
614 	"xchg %%r9, regs+0x48\n\t"		\
615 	"xchg %%r10, regs+0x50\n\t"		\
616 	"xchg %%r11, regs+0x58\n\t"		\
617 	"xchg %%r12, regs+0x60\n\t"		\
618 	"xchg %%r13, regs+0x68\n\t"		\
619 	"xchg %%r14, regs+0x70\n\t"		\
620 	"xchg %%r15, regs+0x78\n\t"
621 
622 #define LOAD_GPR_C	SAVE_GPR_C
623 
624 #define VMX_IO_SIZE_MASK	0x7
625 #define _VMX_IO_BYTE		0
626 #define _VMX_IO_WORD		1
627 #define _VMX_IO_LONG		3
628 #define VMX_IO_DIRECTION_MASK	(1ul << 3)
629 #define VMX_IO_IN		(1ul << 3)
630 #define VMX_IO_OUT		0
631 #define VMX_IO_STRING		(1ul << 4)
632 #define VMX_IO_REP		(1ul << 5)
633 #define VMX_IO_OPRAND_IMM	(1ul << 6)
634 #define VMX_IO_PORT_MASK	0xFFFF0000
635 #define VMX_IO_PORT_SHIFT	16
636 
637 #define VMX_TEST_START		0
638 #define VMX_TEST_VMEXIT		1
639 #define VMX_TEST_EXIT		2
640 #define VMX_TEST_RESUME		3
641 #define VMX_TEST_VMABORT	4
642 #define VMX_TEST_VMSKIP		5
643 
644 #define HYPERCALL_BIT		(1ul << 12)
645 #define HYPERCALL_MASK		0xFFF
646 #define HYPERCALL_VMEXIT	0x1
647 #define HYPERCALL_VMABORT	0x2
648 #define HYPERCALL_VMSKIP	0x3
649 
650 #define EPTP_PG_WALK_LEN_SHIFT	3ul
651 #define EPTP_PG_WALK_LEN_MASK	0x38ul
652 #define EPTP_RESERV_BITS_MASK	0x1ful
653 #define EPTP_RESERV_BITS_SHIFT	0x7ul
654 #define EPTP_AD_FLAG		(1ul << 6)
655 
656 #define EPT_MEM_TYPE_UC		0ul
657 #define EPT_MEM_TYPE_WC		1ul
658 #define EPT_MEM_TYPE_WT		4ul
659 #define EPT_MEM_TYPE_WP		5ul
660 #define EPT_MEM_TYPE_WB		6ul
661 
662 #define EPT_RA			1ul
663 #define EPT_WA			2ul
664 #define EPT_EA			4ul
665 #define EPT_PRESENT		(EPT_RA | EPT_WA | EPT_EA)
666 #define EPT_ACCESS_FLAG		(1ul << 8)
667 #define EPT_DIRTY_FLAG		(1ul << 9)
668 #define EPT_LARGE_PAGE		(1ul << 7)
669 #define EPT_MEM_TYPE_SHIFT	3ul
670 #define EPT_MEM_TYPE_MASK	0x7ul
671 #define EPT_IGNORE_PAT		(1ul << 6)
672 #define EPT_SUPPRESS_VE		(1ull << 63)
673 
674 #define EPT_CAP_EXEC_ONLY	(1ull << 0)
675 #define EPT_CAP_PWL4		(1ull << 6)
676 #define EPT_CAP_PWL5		(1ull << 7)
677 #define EPT_CAP_UC		(1ull << 8)
678 #define EPT_CAP_WB		(1ull << 14)
679 #define EPT_CAP_2M_PAGE		(1ull << 16)
680 #define EPT_CAP_1G_PAGE		(1ull << 17)
681 #define EPT_CAP_INVEPT		(1ull << 20)
682 #define EPT_CAP_AD_FLAG		(1ull << 21)
683 #define EPT_CAP_ADV_EPT_INFO	(1ull << 22)
684 #define EPT_CAP_INVEPT_SINGLE	(1ull << 25)
685 #define EPT_CAP_INVEPT_ALL	(1ull << 26)
686 #define VPID_CAP_INVVPID	(1ull << 32)
687 #define VPID_CAP_INVVPID_ADDR   (1ull << 40)
688 #define VPID_CAP_INVVPID_CXTGLB (1ull << 41)
689 #define VPID_CAP_INVVPID_ALL    (1ull << 42)
690 #define VPID_CAP_INVVPID_CXTLOC	(1ull << 43)
691 
692 #define PAGE_SIZE_2M		(512 * PAGE_SIZE)
693 #define PAGE_SIZE_1G		(512 * PAGE_SIZE_2M)
694 #define EPT_PAGE_LEVEL		4
695 #define EPT_PGDIR_WIDTH		9
696 #define EPT_PGDIR_MASK		511
697 #define EPT_PGDIR_ENTRIES	(1 << EPT_PGDIR_WIDTH)
698 #define EPT_LEVEL_SHIFT(level)	(((level)-1) * EPT_PGDIR_WIDTH + 12)
699 #define EPT_ADDR_MASK		GENMASK_ULL(51, 12)
700 #define PAGE_MASK_2M		(~(PAGE_SIZE_2M-1))
701 
702 #define EPT_VLT_RD		(1ull << 0)
703 #define EPT_VLT_WR		(1ull << 1)
704 #define EPT_VLT_FETCH		(1ull << 2)
705 #define EPT_VLT_PERM_RD		(1ull << 3)
706 #define EPT_VLT_PERM_WR		(1ull << 4)
707 #define EPT_VLT_PERM_EX		(1ull << 5)
708 #define EPT_VLT_PERM_USER_EX	(1ull << 6)
709 #define EPT_VLT_PERMS		(EPT_VLT_PERM_RD | EPT_VLT_PERM_WR | \
710 				 EPT_VLT_PERM_EX)
711 #define EPT_VLT_LADDR_VLD	(1ull << 7)
712 #define EPT_VLT_PADDR		(1ull << 8)
713 #define EPT_VLT_GUEST_USER	(1ull << 9)
714 #define EPT_VLT_GUEST_RW	(1ull << 10)
715 #define EPT_VLT_GUEST_EX	(1ull << 11)
716 #define EPT_VLT_GUEST_MASK	(EPT_VLT_GUEST_USER | EPT_VLT_GUEST_RW | \
717 				 EPT_VLT_GUEST_EX)
718 
719 #define MAGIC_VAL_1		0x12345678ul
720 #define MAGIC_VAL_2		0x87654321ul
721 #define MAGIC_VAL_3		0xfffffffful
722 #define MAGIC_VAL_4		0xdeadbeeful
723 
724 #define INVEPT_SINGLE		1
725 #define INVEPT_GLOBAL		2
726 
727 #define INVVPID_ADDR            0
728 #define INVVPID_CONTEXT_GLOBAL	1
729 #define INVVPID_ALL		2
730 #define INVVPID_CONTEXT_LOCAL	3
731 
732 #define ACTV_ACTIVE		0
733 #define ACTV_HLT		1
734 #define ACTV_SHUTDOWN		2
735 #define ACTV_WAIT_SIPI		3
736 
737 /*
738  * VMCS field encoding:
739  * Bit 0: High-access
740  * Bits 1-9: Index
741  * Bits 10-12: Type
742  * Bits 13-15: Width
743  * Bits 15-64: Reserved
744  */
745 #define VMCS_FIELD_HIGH_SHIFT		(0)
746 #define VMCS_FIELD_INDEX_SHIFT		(1)
747 #define VMCS_FIELD_INDEX_MASK		GENMASK(9, 1)
748 #define VMCS_FIELD_TYPE_SHIFT		(10)
749 #define VMCS_FIELD_WIDTH_SHIFT		(13)
750 #define VMCS_FIELD_RESERVED_SHIFT	(15)
751 #define VMCS_FIELD_BIT_SIZE		(BITS_PER_LONG)
752 
753 extern struct regs regs;
754 
755 extern union vmx_basic basic;
756 extern union vmx_ctrl_msr ctrl_pin_rev;
757 extern union vmx_ctrl_msr ctrl_cpu_rev[2];
758 extern union vmx_ctrl_msr ctrl_exit_rev;
759 extern union vmx_ctrl_msr ctrl_enter_rev;
760 extern union vmx_ept_vpid  ept_vpid;
761 
762 static inline bool ept_2m_supported(void)
763 {
764 	return ept_vpid.val & EPT_CAP_2M_PAGE;
765 }
766 
767 static inline bool ept_1g_supported(void)
768 {
769 	return ept_vpid.val & EPT_CAP_1G_PAGE;
770 }
771 
772 static inline bool ept_huge_pages_supported(int level)
773 {
774 	if (level == 2)
775 		return ept_2m_supported();
776 	else if (level == 3)
777 		return ept_1g_supported();
778 	else
779 		return false;
780 }
781 
782 static inline bool ept_execute_only_supported(void)
783 {
784 	return ept_vpid.val & EPT_CAP_EXEC_ONLY;
785 }
786 
787 static inline bool ept_ad_bits_supported(void)
788 {
789 	return ept_vpid.val & EPT_CAP_AD_FLAG;
790 }
791 
792 static inline bool is_4_level_ept_supported(void)
793 {
794 	return ept_vpid.val & EPT_CAP_PWL4;
795 }
796 
797 static inline bool is_5_level_ept_supported(void)
798 {
799 	return ept_vpid.val & EPT_CAP_PWL5;
800 }
801 
802 static inline bool is_ept_memtype_supported(int type)
803 {
804 	if (type == EPT_MEM_TYPE_UC)
805 		return ept_vpid.val & EPT_CAP_UC;
806 
807 	if (type == EPT_MEM_TYPE_WB)
808 		return ept_vpid.val & EPT_CAP_WB;
809 
810 	return false;
811 }
812 
813 static inline bool is_invept_type_supported(u64 type)
814 {
815 	if (type < INVEPT_SINGLE || type > INVEPT_GLOBAL)
816 		return false;
817 
818 	return ept_vpid.val & (EPT_CAP_INVEPT_SINGLE << (type - INVEPT_SINGLE));
819 }
820 
821 static inline bool is_vpid_supported(void)
822 {
823 	return (ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
824 	       (ctrl_cpu_rev[1].clr & CPU_VPID);
825 }
826 
827 static inline bool is_invvpid_supported(void)
828 {
829 	return ept_vpid.val & VPID_CAP_INVVPID;
830 }
831 
832 static inline bool is_invvpid_type_supported(unsigned long type)
833 {
834 	if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL)
835 		return false;
836 
837 	return ept_vpid.val & (VPID_CAP_INVVPID_ADDR << (type - INVVPID_ADDR));
838 }
839 
840 extern u64 *bsp_vmxon_region;
841 extern bool launched;
842 
843 void vmx_set_test_stage(u32 s);
844 u32 vmx_get_test_stage(void);
845 void vmx_inc_test_stage(void);
846 
847 /* -1 on VM-Fail, 0 on success, >1 on fault */
848 static int __vmxon_safe(u64 *vmxon_region)
849 {
850 	bool vmfail;
851 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
852 
853 	asm volatile ("push %1\n\t"
854 		      "popf\n\t"
855 		      ASM_TRY("1f") "vmxon %2\n\t"
856 		      "setbe %0\n\t"
857 		      "jmp 2f\n\t"
858 		      "1: movb $0, %0\n\t"
859 		      "2:\n\t"
860 		      : "=q" (vmfail) : "q" (rflags), "m" (vmxon_region) : "cc");
861 
862 	if (vmfail)
863 		return -1;
864 
865 	return exception_vector();
866 }
867 
868 static int vmxon_safe(void)
869 {
870 	return __vmxon_safe(bsp_vmxon_region);
871 }
872 
873 static int vmx_on(void)
874 {
875 	return vmxon_safe();
876 }
877 
878 static int vmx_off(void)
879 {
880 	bool ret;
881 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
882 
883 	asm volatile("push %1; popf; vmxoff; setbe %0\n\t"
884 		     : "=q"(ret) : "q" (rflags) : "cc");
885 	return ret;
886 }
887 
888 static inline int make_vmcs_current(struct vmcs *vmcs)
889 {
890 	bool ret;
891 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
892 
893 	asm volatile ("push %1; popf; vmptrld %2; setbe %0"
894 		      : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc");
895 	return ret;
896 }
897 
898 static inline int vmcs_clear(struct vmcs *vmcs)
899 {
900 	bool ret;
901 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
902 
903 	asm volatile ("push %1; popf; vmclear %2; setbe %0"
904 		      : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc");
905 	return ret;
906 }
907 
908 static inline u64 vmcs_read(enum Encoding enc)
909 {
910 	u64 val;
911 	asm volatile ("vmread %1, %0" : "=rm" (val) : "r" ((u64)enc) : "cc");
912 	return val;
913 }
914 
915 /*
916  * VMREAD with a guaranteed memory operand, used to test KVM's MMU by forcing
917  * KVM to translate GVA->GPA.
918  */
919 static inline u64 vmcs_readm(enum Encoding enc)
920 {
921 	u64 val;
922 
923 	asm volatile ("vmread %1, %0" : "=m" (val) : "r" ((u64)enc) : "cc");
924 	return val;
925 }
926 
927 static inline int vmcs_read_safe(enum Encoding enc, u64 *value)
928 {
929 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
930 	u64 encoding = enc;
931 	u64 val;
932 
933 	asm volatile ("shl $8, %%rax;"
934 		      "sahf;"
935 		      "vmread %[encoding], %[val];"
936 		      "lahf;"
937 		      "shr $8, %%rax"
938 		      : /* output */ [val]"=rm"(val), "+a"(rflags)
939 		      : /* input */ [encoding]"r"(encoding)
940 		      : /* clobber */ "cc");
941 
942 	*value = val;
943 	return rflags & (X86_EFLAGS_CF | X86_EFLAGS_ZF);
944 }
945 
946 static inline int vmcs_write(enum Encoding enc, u64 val)
947 {
948 	bool ret;
949 	asm volatile ("vmwrite %1, %2; setbe %0"
950 		: "=q"(ret) : "rm" (val), "r" ((u64)enc) : "cc");
951 	return ret;
952 }
953 
954 static inline int vmcs_set_bits(enum Encoding enc, u64 val)
955 {
956 	return vmcs_write(enc, vmcs_read(enc) | val);
957 }
958 
959 static inline int vmcs_clear_bits(enum Encoding enc, u64 val)
960 {
961 	return vmcs_write(enc, vmcs_read(enc) & ~val);
962 }
963 
964 static inline int vmcs_save(struct vmcs **vmcs)
965 {
966 	bool ret;
967 	unsigned long pa;
968 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
969 
970 	asm volatile ("push %2; popf; vmptrst %1; setbe %0"
971 		      : "=q" (ret), "=m" (pa) : "r" (rflags) : "cc");
972 	*vmcs = (pa == -1ull) ? NULL : phys_to_virt(pa);
973 	return ret;
974 }
975 
976 static inline int __invept(unsigned long type, u64 eptp)
977 {
978 	bool failed = false;
979 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
980 
981 	struct {
982 		u64 eptp, gpa;
983 	} operand = {eptp, 0};
984 	asm volatile("push %1; popf; invept %2, %3; setbe %0"
985 		     : "=q" (failed) : "r" (rflags), "m"(operand),"r"(type) : "cc");
986 	return failed ? -1: 0;
987 }
988 
989 static inline void invept(unsigned long type, u64 eptp)
990 {
991 	__TEST_ASSERT(!__invept(type, eptp));
992 }
993 
994 static inline int __invvpid(unsigned long type, u64 vpid, u64 gla)
995 {
996 	bool failed = false;
997 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
998 
999 	struct invvpid_operand operand = {vpid, gla};
1000 	asm volatile("push %1; popf; invvpid %2, %3; setbe %0"
1001 		     : "=q" (failed) : "r" (rflags), "m"(operand),"r"(type) : "cc");
1002 	return failed ? -1: 0;
1003 }
1004 
1005 static inline void invvpid(unsigned long type, u64 vpid, u64 gla)
1006 {
1007 	__TEST_ASSERT(!__invvpid(type, vpid, gla));
1008 }
1009 
1010 void enable_vmx(void);
1011 void init_vmx(u64 *vmxon_region);
1012 int init_vmcs(struct vmcs **vmcs);
1013 
1014 const char *exit_reason_description(u64 reason);
1015 void print_vmexit_info(union exit_reason exit_reason);
1016 void print_vmentry_failure_info(struct vmentry_result *result);
1017 void install_ept_entry(unsigned long *pml4, int pte_level,
1018 		unsigned long guest_addr, unsigned long pte,
1019 		unsigned long *pt_page);
1020 void install_1g_ept(unsigned long *pml4, unsigned long phys,
1021 		unsigned long guest_addr, u64 perm);
1022 void install_2m_ept(unsigned long *pml4, unsigned long phys,
1023 		unsigned long guest_addr, u64 perm);
1024 void install_ept(unsigned long *pml4, unsigned long phys,
1025 		unsigned long guest_addr, u64 perm);
1026 void setup_ept_range(unsigned long *pml4, unsigned long start,
1027 		     unsigned long len, int map_1g, int map_2m, u64 perm);
1028 bool get_ept_pte(unsigned long *pml4, unsigned long guest_addr, int level,
1029 		unsigned long *pte);
1030 void set_ept_pte(unsigned long *pml4, unsigned long guest_addr,
1031 		int level, u64 pte_val);
1032 void check_ept_ad(unsigned long *pml4, u64 guest_cr3,
1033 		  unsigned long guest_addr, int expected_gpa_ad,
1034 		  int expected_pt_ad);
1035 void clear_ept_ad(unsigned long *pml4, u64 guest_cr3,
1036 		  unsigned long guest_addr);
1037 
1038 #define        ABORT_ON_EARLY_VMENTRY_FAIL     0x1
1039 #define        ABORT_ON_INVALID_GUEST_STATE    0x2
1040 
1041 void __enter_guest(u8 abort_flag, struct vmentry_result *result);
1042 void enter_guest(void);
1043 void enter_guest_with_bad_controls(void);
1044 void hypercall(u32 hypercall_no);
1045 
1046 typedef void (*test_guest_func)(void);
1047 typedef void (*test_teardown_func)(void *data);
1048 void test_set_guest(test_guest_func func);
1049 void test_override_guest(test_guest_func func);
1050 void test_add_teardown(test_teardown_func func, void *data);
1051 void test_skip(const char *msg);
1052 void test_set_guest_finished(void);
1053 
1054 #endif
1055