1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 #ifndef SELFTEST_KVM_UTIL_H
6 #define SELFTEST_KVM_UTIL_H
7 
8 #include "test_util.h"
9 
10 #include <linux/compiler.h>
11 #include "linux/hashtable.h"
12 #include "linux/list.h"
13 #include <linux/kernel.h>
14 #include <linux/kvm.h>
15 #include "linux/rbtree.h"
16 #include <linux/types.h>
17 
18 #include <asm/atomic.h>
19 #include <asm/kvm.h>
20 
21 #include <sys/ioctl.h>
22 
23 #include "kvm_util_arch.h"
24 #include "kvm_util_types.h"
25 #include "sparsebit.h"
26 
27 #define KVM_DEV_PATH "/dev/kvm"
28 #define KVM_MAX_VCPUS 512
29 
30 #define NSEC_PER_SEC 1000000000L
31 
32 struct userspace_mem_region {
33 	struct kvm_userspace_memory_region2 region;
34 	struct sparsebit *unused_phy_pages;
35 	struct sparsebit *protected_phy_pages;
36 	int fd;
37 	off_t offset;
38 	enum vm_mem_backing_src_type backing_src_type;
39 	void *host_mem;
40 	void *host_alias;
41 	void *mmap_start;
42 	void *mmap_alias;
43 	size_t mmap_size;
44 	struct rb_node gpa_node;
45 	struct rb_node hva_node;
46 	struct hlist_node slot_node;
47 };
48 
49 struct kvm_binary_stats {
50 	int fd;
51 	struct kvm_stats_header header;
52 	struct kvm_stats_desc *desc;
53 };
54 
55 struct kvm_vcpu {
56 	struct list_head list;
57 	uint32_t id;
58 	int fd;
59 	struct kvm_vm *vm;
60 	struct kvm_run *run;
61 #ifdef __x86_64__
62 	struct kvm_cpuid2 *cpuid;
63 #endif
64 	struct kvm_binary_stats stats;
65 	struct kvm_dirty_gfn *dirty_gfns;
66 	uint32_t fetch_index;
67 	uint32_t dirty_gfns_count;
68 };
69 
70 struct userspace_mem_regions {
71 	struct rb_root gpa_tree;
72 	struct rb_root hva_tree;
73 	DECLARE_HASHTABLE(slot_hash, 9);
74 };
75 
76 enum kvm_mem_region_type {
77 	MEM_REGION_CODE,
78 	MEM_REGION_DATA,
79 	MEM_REGION_PT,
80 	MEM_REGION_TEST_DATA,
81 	NR_MEM_REGIONS,
82 };
83 
84 struct kvm_vm {
85 	int mode;
86 	unsigned long type;
87 	int kvm_fd;
88 	int fd;
89 	unsigned int pgtable_levels;
90 	unsigned int page_size;
91 	unsigned int page_shift;
92 	unsigned int pa_bits;
93 	unsigned int va_bits;
94 	uint64_t max_gfn;
95 	struct list_head vcpus;
96 	struct userspace_mem_regions regions;
97 	struct sparsebit *vpages_valid;
98 	struct sparsebit *vpages_mapped;
99 	bool has_irqchip;
100 	bool pgd_created;
101 	vm_paddr_t ucall_mmio_addr;
102 	vm_paddr_t pgd;
103 	vm_vaddr_t handlers;
104 	uint32_t dirty_ring_size;
105 	uint64_t gpa_tag_mask;
106 
107 	struct kvm_vm_arch arch;
108 
109 	struct kvm_binary_stats stats;
110 
111 	/*
112 	 * KVM region slots. These are the default memslots used by page
113 	 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
114 	 * memslot.
115 	 */
116 	uint32_t memslots[NR_MEM_REGIONS];
117 };
118 
119 struct vcpu_reg_sublist {
120 	const char *name;
121 	long capability;
122 	int feature;
123 	int feature_type;
124 	bool finalize;
125 	__u64 *regs;
126 	__u64 regs_n;
127 	__u64 *rejects_set;
128 	__u64 rejects_set_n;
129 	__u64 *skips_set;
130 	__u64 skips_set_n;
131 };
132 
133 struct vcpu_reg_list {
134 	char *name;
135 	struct vcpu_reg_sublist sublists[];
136 };
137 
138 #define for_each_sublist(c, s)		\
139 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
140 
141 #define kvm_for_each_vcpu(vm, i, vcpu)			\
142 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
143 		if (!((vcpu) = vm->vcpus[i]))		\
144 			continue;			\
145 		else
146 
147 struct userspace_mem_region *
148 memslot2region(struct kvm_vm *vm, uint32_t memslot);
149 
vm_get_mem_region(struct kvm_vm * vm,enum kvm_mem_region_type type)150 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
151 							     enum kvm_mem_region_type type)
152 {
153 	assert(type < NR_MEM_REGIONS);
154 	return memslot2region(vm, vm->memslots[type]);
155 }
156 
157 /* Minimum allocated guest virtual and physical addresses */
158 #define KVM_UTIL_MIN_VADDR		0x2000
159 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
160 
161 #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
162 #define DEFAULT_STACK_PGS		5
163 
164 enum vm_guest_mode {
165 	VM_MODE_P52V48_4K,
166 	VM_MODE_P52V48_16K,
167 	VM_MODE_P52V48_64K,
168 	VM_MODE_P48V48_4K,
169 	VM_MODE_P48V48_16K,
170 	VM_MODE_P48V48_64K,
171 	VM_MODE_P40V48_4K,
172 	VM_MODE_P40V48_16K,
173 	VM_MODE_P40V48_64K,
174 	VM_MODE_PXXV48_4K,	/* For 48bits VA but ANY bits PA */
175 	VM_MODE_P47V64_4K,
176 	VM_MODE_P44V64_4K,
177 	VM_MODE_P36V48_4K,
178 	VM_MODE_P36V48_16K,
179 	VM_MODE_P36V48_64K,
180 	VM_MODE_P36V47_16K,
181 	NUM_VM_MODES,
182 };
183 
184 struct vm_shape {
185 	uint32_t type;
186 	uint8_t  mode;
187 	uint8_t  pad0;
188 	uint16_t pad1;
189 };
190 
191 kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
192 
193 #define VM_TYPE_DEFAULT			0
194 
195 #define VM_SHAPE(__mode)			\
196 ({						\
197 	struct vm_shape shape = {		\
198 		.mode = (__mode),		\
199 		.type = VM_TYPE_DEFAULT		\
200 	};					\
201 						\
202 	shape;					\
203 })
204 
205 #if defined(__aarch64__)
206 
207 extern enum vm_guest_mode vm_mode_default;
208 
209 #define VM_MODE_DEFAULT			vm_mode_default
210 #define MIN_PAGE_SHIFT			12U
211 #define ptes_per_page(page_size)	((page_size) / 8)
212 
213 #elif defined(__x86_64__)
214 
215 #define VM_MODE_DEFAULT			VM_MODE_PXXV48_4K
216 #define MIN_PAGE_SHIFT			12U
217 #define ptes_per_page(page_size)	((page_size) / 8)
218 
219 #elif defined(__s390x__)
220 
221 #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
222 #define MIN_PAGE_SHIFT			12U
223 #define ptes_per_page(page_size)	((page_size) / 16)
224 
225 #elif defined(__riscv)
226 
227 #if __riscv_xlen == 32
228 #error "RISC-V 32-bit kvm selftests not supported"
229 #endif
230 
231 #define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
232 #define MIN_PAGE_SHIFT			12U
233 #define ptes_per_page(page_size)	((page_size) / 8)
234 
235 #endif
236 
237 #define VM_SHAPE_DEFAULT	VM_SHAPE(VM_MODE_DEFAULT)
238 
239 #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
240 #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
241 
242 struct vm_guest_mode_params {
243 	unsigned int pa_bits;
244 	unsigned int va_bits;
245 	unsigned int page_size;
246 	unsigned int page_shift;
247 };
248 extern const struct vm_guest_mode_params vm_guest_mode_params[];
249 
250 int open_path_or_exit(const char *path, int flags);
251 int open_kvm_dev_path_or_exit(void);
252 
253 bool get_kvm_param_bool(const char *param);
254 bool get_kvm_intel_param_bool(const char *param);
255 bool get_kvm_amd_param_bool(const char *param);
256 
257 int get_kvm_param_integer(const char *param);
258 int get_kvm_intel_param_integer(const char *param);
259 int get_kvm_amd_param_integer(const char *param);
260 
261 unsigned int kvm_check_cap(long cap);
262 
kvm_has_cap(long cap)263 static inline bool kvm_has_cap(long cap)
264 {
265 	return kvm_check_cap(cap);
266 }
267 
268 #define __KVM_SYSCALL_ERROR(_name, _ret) \
269 	"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
270 
271 /*
272  * Use the "inner", double-underscore macro when reporting errors from within
273  * other macros so that the name of ioctl() and not its literal numeric value
274  * is printed on error.  The "outer" macro is strongly preferred when reporting
275  * errors "directly", i.e. without an additional layer of macros, as it reduces
276  * the probability of passing in the wrong string.
277  */
278 #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
279 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
280 
281 #define kvm_do_ioctl(fd, cmd, arg)						\
282 ({										\
283 	kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd));	\
284 	ioctl(fd, cmd, arg);							\
285 })
286 
287 #define __kvm_ioctl(kvm_fd, cmd, arg)				\
288 	kvm_do_ioctl(kvm_fd, cmd, arg)
289 
290 #define kvm_ioctl(kvm_fd, cmd, arg)				\
291 ({								\
292 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
293 								\
294 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret));	\
295 })
296 
static_assert_is_vm(struct kvm_vm * vm)297 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
298 
299 #define __vm_ioctl(vm, cmd, arg)				\
300 ({								\
301 	static_assert_is_vm(vm);				\
302 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
303 })
304 
305 /*
306  * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
307  * the ioctl() failed because KVM killed/bugged the VM.  To detect a dead VM,
308  * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
309  * selftests existed and (b) should never outright fail, i.e. is supposed to
310  * return 0 or 1.  If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
311  * VM and its vCPUs, including KVM_CHECK_EXTENSION.
312  */
313 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm)				\
314 do {											\
315 	int __errno = errno;								\
316 											\
317 	static_assert_is_vm(vm);							\
318 											\
319 	if (cond)									\
320 		break;									\
321 											\
322 	if (errno == EIO &&								\
323 	    __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) {	\
324 		TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO");	\
325 		TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues");	\
326 	}										\
327 	errno = __errno;								\
328 	TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret));				\
329 } while (0)
330 
331 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm)		\
332 	__TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
333 
334 #define vm_ioctl(vm, cmd, arg)					\
335 ({								\
336 	int ret = __vm_ioctl(vm, cmd, arg);			\
337 								\
338 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm);		\
339 })
340 
static_assert_is_vcpu(struct kvm_vcpu * vcpu)341 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
342 
343 #define __vcpu_ioctl(vcpu, cmd, arg)				\
344 ({								\
345 	static_assert_is_vcpu(vcpu);				\
346 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
347 })
348 
349 #define vcpu_ioctl(vcpu, cmd, arg)				\
350 ({								\
351 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
352 								\
353 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm);	\
354 })
355 
356 /*
357  * Looks up and returns the value corresponding to the capability
358  * (KVM_CAP_*) given by cap.
359  */
vm_check_cap(struct kvm_vm * vm,long cap)360 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
361 {
362 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
363 
364 	TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
365 	return ret;
366 }
367 
__vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)368 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
369 {
370 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
371 
372 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
373 }
vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)374 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
375 {
376 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
377 
378 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
379 }
380 
vm_set_memory_attributes(struct kvm_vm * vm,uint64_t gpa,uint64_t size,uint64_t attributes)381 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
382 					    uint64_t size, uint64_t attributes)
383 {
384 	struct kvm_memory_attributes attr = {
385 		.attributes = attributes,
386 		.address = gpa,
387 		.size = size,
388 		.flags = 0,
389 	};
390 
391 	/*
392 	 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes.  These flows
393 	 * need significant enhancements to support multiple attributes.
394 	 */
395 	TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
396 		    "Update me to support multiple attributes!");
397 
398 	vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
399 }
400 
401 
vm_mem_set_private(struct kvm_vm * vm,uint64_t gpa,uint64_t size)402 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
403 				      uint64_t size)
404 {
405 	vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
406 }
407 
vm_mem_set_shared(struct kvm_vm * vm,uint64_t gpa,uint64_t size)408 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
409 				     uint64_t size)
410 {
411 	vm_set_memory_attributes(vm, gpa, size, 0);
412 }
413 
414 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
415 			    bool punch_hole);
416 
vm_guest_mem_punch_hole(struct kvm_vm * vm,uint64_t gpa,uint64_t size)417 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
418 					   uint64_t size)
419 {
420 	vm_guest_mem_fallocate(vm, gpa, size, true);
421 }
422 
vm_guest_mem_allocate(struct kvm_vm * vm,uint64_t gpa,uint64_t size)423 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
424 					 uint64_t size)
425 {
426 	vm_guest_mem_fallocate(vm, gpa, size, false);
427 }
428 
429 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
430 const char *vm_guest_mode_string(uint32_t i);
431 
432 void kvm_vm_free(struct kvm_vm *vmp);
433 void kvm_vm_restart(struct kvm_vm *vmp);
434 void kvm_vm_release(struct kvm_vm *vmp);
435 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
436 int kvm_memfd_alloc(size_t size, bool hugepages);
437 
438 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
439 
kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log)440 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
441 {
442 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
443 
444 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
445 }
446 
kvm_vm_clear_dirty_log(struct kvm_vm * vm,int slot,void * log,uint64_t first_page,uint32_t num_pages)447 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
448 					  uint64_t first_page, uint32_t num_pages)
449 {
450 	struct kvm_clear_dirty_log args = {
451 		.dirty_bitmap = log,
452 		.slot = slot,
453 		.first_page = first_page,
454 		.num_pages = num_pages
455 	};
456 
457 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
458 }
459 
kvm_vm_reset_dirty_ring(struct kvm_vm * vm)460 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
461 {
462 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
463 }
464 
kvm_vm_register_coalesced_io(struct kvm_vm * vm,uint64_t address,uint64_t size,bool pio)465 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
466 						uint64_t address,
467 						uint64_t size, bool pio)
468 {
469 	struct kvm_coalesced_mmio_zone zone = {
470 		.addr = address,
471 		.size = size,
472 		.pio  = pio,
473 	};
474 
475 	vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
476 }
477 
kvm_vm_unregister_coalesced_io(struct kvm_vm * vm,uint64_t address,uint64_t size,bool pio)478 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
479 						  uint64_t address,
480 						  uint64_t size, bool pio)
481 {
482 	struct kvm_coalesced_mmio_zone zone = {
483 		.addr = address,
484 		.size = size,
485 		.pio  = pio,
486 	};
487 
488 	vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
489 }
490 
vm_get_stats_fd(struct kvm_vm * vm)491 static inline int vm_get_stats_fd(struct kvm_vm *vm)
492 {
493 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
494 
495 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
496 	return fd;
497 }
498 
read_stats_header(int stats_fd,struct kvm_stats_header * header)499 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
500 {
501 	ssize_t ret;
502 
503 	ret = pread(stats_fd, header, sizeof(*header), 0);
504 	TEST_ASSERT(ret == sizeof(*header),
505 		    "Failed to read '%lu' header bytes, ret = '%ld'",
506 		    sizeof(*header), ret);
507 }
508 
509 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
510 					      struct kvm_stats_header *header);
511 
get_stats_descriptor_size(struct kvm_stats_header * header)512 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
513 {
514 	 /*
515 	  * The base size of the descriptor is defined by KVM's ABI, but the
516 	  * size of the name field is variable, as far as KVM's ABI is
517 	  * concerned. For a given instance of KVM, the name field is the same
518 	  * size for all stats and is provided in the overall stats header.
519 	  */
520 	return sizeof(struct kvm_stats_desc) + header->name_size;
521 }
522 
get_stats_descriptor(struct kvm_stats_desc * stats,int index,struct kvm_stats_header * header)523 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
524 							  int index,
525 							  struct kvm_stats_header *header)
526 {
527 	/*
528 	 * Note, size_desc includes the size of the name field, which is
529 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
530 	 */
531 	return (void *)stats + index * get_stats_descriptor_size(header);
532 }
533 
534 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
535 		    struct kvm_stats_desc *desc, uint64_t *data,
536 		    size_t max_elements);
537 
538 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
539 		  uint64_t *data, size_t max_elements);
540 
541 #define __get_stat(stats, stat)							\
542 ({										\
543 	uint64_t data;								\
544 										\
545 	kvm_get_stat(stats, #stat, &data, 1);					\
546 	data;									\
547 })
548 
549 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
550 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
551 
552 void vm_create_irqchip(struct kvm_vm *vm);
553 
__vm_create_guest_memfd(struct kvm_vm * vm,uint64_t size,uint64_t flags)554 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
555 					uint64_t flags)
556 {
557 	struct kvm_create_guest_memfd guest_memfd = {
558 		.size = size,
559 		.flags = flags,
560 	};
561 
562 	return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
563 }
564 
vm_create_guest_memfd(struct kvm_vm * vm,uint64_t size,uint64_t flags)565 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
566 					uint64_t flags)
567 {
568 	int fd = __vm_create_guest_memfd(vm, size, flags);
569 
570 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
571 	return fd;
572 }
573 
574 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
575 			       uint64_t gpa, uint64_t size, void *hva);
576 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
577 				uint64_t gpa, uint64_t size, void *hva);
578 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
579 				uint64_t gpa, uint64_t size, void *hva,
580 				uint32_t guest_memfd, uint64_t guest_memfd_offset);
581 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
582 				 uint64_t gpa, uint64_t size, void *hva,
583 				 uint32_t guest_memfd, uint64_t guest_memfd_offset);
584 
585 void vm_userspace_mem_region_add(struct kvm_vm *vm,
586 	enum vm_mem_backing_src_type src_type,
587 	uint64_t guest_paddr, uint32_t slot, uint64_t npages,
588 	uint32_t flags);
589 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
590 		uint64_t guest_paddr, uint32_t slot, uint64_t npages,
591 		uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
592 
593 #ifndef vm_arch_has_protected_memory
vm_arch_has_protected_memory(struct kvm_vm * vm)594 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
595 {
596 	return false;
597 }
598 #endif
599 
600 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
601 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
602 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
603 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
604 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
605 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
606 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
607 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
608 			    enum kvm_mem_region_type type);
609 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
610 				 vm_vaddr_t vaddr_min,
611 				 enum kvm_mem_region_type type);
612 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
613 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
614 				 enum kvm_mem_region_type type);
615 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
616 
617 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
618 	      unsigned int npages);
619 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
620 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
621 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
622 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
623 
624 #ifndef vcpu_arch_put_guest
625 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
626 #endif
627 
vm_untag_gpa(struct kvm_vm * vm,vm_paddr_t gpa)628 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
629 {
630 	return gpa & ~vm->gpa_tag_mask;
631 }
632 
633 void vcpu_run(struct kvm_vcpu *vcpu);
634 int _vcpu_run(struct kvm_vcpu *vcpu);
635 
__vcpu_run(struct kvm_vcpu * vcpu)636 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
637 {
638 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
639 }
640 
641 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
642 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
643 
vcpu_enable_cap(struct kvm_vcpu * vcpu,uint32_t cap,uint64_t arg0)644 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
645 				   uint64_t arg0)
646 {
647 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
648 
649 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
650 }
651 
vcpu_guest_debug_set(struct kvm_vcpu * vcpu,struct kvm_guest_debug * debug)652 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
653 					struct kvm_guest_debug *debug)
654 {
655 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
656 }
657 
vcpu_mp_state_get(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)658 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
659 				     struct kvm_mp_state *mp_state)
660 {
661 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
662 }
vcpu_mp_state_set(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)663 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
664 				     struct kvm_mp_state *mp_state)
665 {
666 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
667 }
668 
vcpu_regs_get(struct kvm_vcpu * vcpu,struct kvm_regs * regs)669 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
670 {
671 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
672 }
673 
vcpu_regs_set(struct kvm_vcpu * vcpu,struct kvm_regs * regs)674 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
675 {
676 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
677 }
vcpu_sregs_get(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)678 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
679 {
680 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
681 
682 }
vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)683 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
684 {
685 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
686 }
_vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)687 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
688 {
689 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
690 }
vcpu_fpu_get(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)691 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
692 {
693 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
694 }
vcpu_fpu_set(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)695 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
696 {
697 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
698 }
699 
__vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)700 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
701 {
702 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
703 
704 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
705 }
__vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)706 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
707 {
708 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
709 
710 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
711 }
vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id)712 static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
713 {
714 	uint64_t val;
715 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
716 
717 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
718 
719 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
720 	return val;
721 }
vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)722 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
723 {
724 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
725 
726 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
727 
728 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
729 }
730 
731 #ifdef __KVM_HAVE_VCPU_EVENTS
vcpu_events_get(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)732 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
733 				   struct kvm_vcpu_events *events)
734 {
735 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
736 }
vcpu_events_set(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)737 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
738 				   struct kvm_vcpu_events *events)
739 {
740 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
741 }
742 #endif
743 #ifdef __x86_64__
vcpu_nested_state_get(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)744 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
745 					 struct kvm_nested_state *state)
746 {
747 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
748 }
__vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)749 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
750 					  struct kvm_nested_state *state)
751 {
752 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
753 }
754 
vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)755 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
756 					 struct kvm_nested_state *state)
757 {
758 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
759 }
760 #endif
vcpu_get_stats_fd(struct kvm_vcpu * vcpu)761 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
762 {
763 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
764 
765 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
766 	return fd;
767 }
768 
769 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
770 
kvm_has_device_attr(int dev_fd,uint32_t group,uint64_t attr)771 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
772 {
773 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
774 
775 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
776 }
777 
778 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
779 
kvm_device_attr_get(int dev_fd,uint32_t group,uint64_t attr,void * val)780 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
781 				       uint64_t attr, void *val)
782 {
783 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
784 
785 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
786 }
787 
788 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
789 
kvm_device_attr_set(int dev_fd,uint32_t group,uint64_t attr,void * val)790 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
791 				       uint64_t attr, void *val)
792 {
793 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
794 
795 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
796 }
797 
__vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)798 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
799 					 uint64_t attr)
800 {
801 	return __kvm_has_device_attr(vcpu->fd, group, attr);
802 }
803 
vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)804 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
805 					uint64_t attr)
806 {
807 	kvm_has_device_attr(vcpu->fd, group, attr);
808 }
809 
__vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)810 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
811 					 uint64_t attr, void *val)
812 {
813 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
814 }
815 
vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)816 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
817 					uint64_t attr, void *val)
818 {
819 	kvm_device_attr_get(vcpu->fd, group, attr, val);
820 }
821 
__vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)822 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
823 					 uint64_t attr, void *val)
824 {
825 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
826 }
827 
vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)828 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
829 					uint64_t attr, void *val)
830 {
831 	kvm_device_attr_set(vcpu->fd, group, attr, val);
832 }
833 
834 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
835 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
836 
kvm_create_device(struct kvm_vm * vm,uint64_t type)837 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
838 {
839 	int fd = __kvm_create_device(vm, type);
840 
841 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
842 	return fd;
843 }
844 
845 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
846 
847 /*
848  * VM VCPU Args Set
849  *
850  * Input Args:
851  *   vm - Virtual Machine
852  *   num - number of arguments
853  *   ... - arguments, each of type uint64_t
854  *
855  * Output Args: None
856  *
857  * Return: None
858  *
859  * Sets the first @num input parameters for the function at @vcpu's entry point,
860  * per the C calling convention of the architecture, to the values given as
861  * variable args. Each of the variable args is expected to be of type uint64_t.
862  * The maximum @num can be is specific to the architecture.
863  */
864 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
865 
866 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
867 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
868 
869 #define KVM_MAX_IRQ_ROUTES		4096
870 
871 struct kvm_irq_routing *kvm_gsi_routing_create(void);
872 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
873 		uint32_t gsi, uint32_t pin);
874 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
875 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
876 
877 const char *exit_reason_str(unsigned int exit_reason);
878 
879 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
880 			     uint32_t memslot);
881 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
882 				vm_paddr_t paddr_min, uint32_t memslot,
883 				bool protected);
884 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
885 
vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot)886 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
887 					    vm_paddr_t paddr_min, uint32_t memslot)
888 {
889 	/*
890 	 * By default, allocate memory as protected for VMs that support
891 	 * protected memory, as the majority of memory for such VMs is
892 	 * protected, i.e. using shared memory is effectively opt-in.
893 	 */
894 	return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
895 				    vm_arch_has_protected_memory(vm));
896 }
897 
898 /*
899  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
900  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
901  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
902  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
903  */
904 struct kvm_vm *____vm_create(struct vm_shape shape);
905 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
906 			   uint64_t nr_extra_pages);
907 
vm_create_barebones(void)908 static inline struct kvm_vm *vm_create_barebones(void)
909 {
910 	return ____vm_create(VM_SHAPE_DEFAULT);
911 }
912 
vm_create_barebones_type(unsigned long type)913 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
914 {
915 	const struct vm_shape shape = {
916 		.mode = VM_MODE_DEFAULT,
917 		.type = type,
918 	};
919 
920 	return ____vm_create(shape);
921 }
922 
vm_create(uint32_t nr_runnable_vcpus)923 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
924 {
925 	return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
926 }
927 
928 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
929 				      uint64_t extra_mem_pages,
930 				      void *guest_code, struct kvm_vcpu *vcpus[]);
931 
vm_create_with_vcpus(uint32_t nr_vcpus,void * guest_code,struct kvm_vcpu * vcpus[])932 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
933 						  void *guest_code,
934 						  struct kvm_vcpu *vcpus[])
935 {
936 	return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
937 				      guest_code, vcpus);
938 }
939 
940 
941 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
942 					       struct kvm_vcpu **vcpu,
943 					       uint64_t extra_mem_pages,
944 					       void *guest_code);
945 
946 /*
947  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
948  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
949  */
__vm_create_with_one_vcpu(struct kvm_vcpu ** vcpu,uint64_t extra_mem_pages,void * guest_code)950 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
951 						       uint64_t extra_mem_pages,
952 						       void *guest_code)
953 {
954 	return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
955 					       extra_mem_pages, guest_code);
956 }
957 
vm_create_with_one_vcpu(struct kvm_vcpu ** vcpu,void * guest_code)958 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
959 						     void *guest_code)
960 {
961 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
962 }
963 
vm_create_shape_with_one_vcpu(struct vm_shape shape,struct kvm_vcpu ** vcpu,void * guest_code)964 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
965 							   struct kvm_vcpu **vcpu,
966 							   void *guest_code)
967 {
968 	return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
969 }
970 
971 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
972 
973 void kvm_set_files_rlimit(uint32_t nr_vcpus);
974 
975 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
976 void kvm_print_vcpu_pinning_help(void);
977 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
978 			    int nr_vcpus);
979 
980 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
981 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
982 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
983 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
984 static inline unsigned int
vm_adjust_num_guest_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)985 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
986 {
987 	unsigned int n;
988 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
989 #ifdef __s390x__
990 	/* s390 requires 1M aligned guest sizes */
991 	n = (n + 255) & ~255;
992 #endif
993 	return n;
994 }
995 
996 #define sync_global_to_guest(vm, g) ({				\
997 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
998 	memcpy(_p, &(g), sizeof(g));				\
999 })
1000 
1001 #define sync_global_from_guest(vm, g) ({			\
1002 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
1003 	memcpy(&(g), _p, sizeof(g));				\
1004 })
1005 
1006 /*
1007  * Write a global value, but only in the VM's (guest's) domain.  Primarily used
1008  * for "globals" that hold per-VM values (VMs always duplicate code and global
1009  * data into their own region of physical memory), but can be used anytime it's
1010  * undesirable to change the host's copy of the global.
1011  */
1012 #define write_guest_global(vm, g, val) ({			\
1013 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
1014 	typeof(g) _val = val;					\
1015 								\
1016 	memcpy(_p, &(_val), sizeof(g));				\
1017 })
1018 
1019 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
1020 
1021 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
1022 		    uint8_t indent);
1023 
vcpu_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)1024 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
1025 			     uint8_t indent)
1026 {
1027 	vcpu_arch_dump(stream, vcpu, indent);
1028 }
1029 
1030 /*
1031  * Adds a vCPU with reasonable defaults (e.g. a stack)
1032  *
1033  * Input Args:
1034  *   vm - Virtual Machine
1035  *   vcpu_id - The id of the VCPU to add to the VM.
1036  */
1037 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1038 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1039 
vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)1040 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1041 					   void *guest_code)
1042 {
1043 	struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1044 
1045 	vcpu_arch_set_entry_point(vcpu, guest_code);
1046 
1047 	return vcpu;
1048 }
1049 
1050 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1051 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1052 
vm_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)1053 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1054 						uint32_t vcpu_id)
1055 {
1056 	return vm_arch_vcpu_recreate(vm, vcpu_id);
1057 }
1058 
1059 void vcpu_arch_free(struct kvm_vcpu *vcpu);
1060 
1061 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1062 
virt_pgd_alloc(struct kvm_vm * vm)1063 static inline void virt_pgd_alloc(struct kvm_vm *vm)
1064 {
1065 	virt_arch_pgd_alloc(vm);
1066 }
1067 
1068 /*
1069  * VM Virtual Page Map
1070  *
1071  * Input Args:
1072  *   vm - Virtual Machine
1073  *   vaddr - VM Virtual Address
1074  *   paddr - VM Physical Address
1075  *   memslot - Memory region slot for new virtual translation tables
1076  *
1077  * Output Args: None
1078  *
1079  * Return: None
1080  *
1081  * Within @vm, creates a virtual translation for the page starting
1082  * at @vaddr to the page starting at @paddr.
1083  */
1084 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1085 
virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)1086 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1087 {
1088 	virt_arch_pg_map(vm, vaddr, paddr);
1089 }
1090 
1091 
1092 /*
1093  * Address Guest Virtual to Guest Physical
1094  *
1095  * Input Args:
1096  *   vm - Virtual Machine
1097  *   gva - VM virtual address
1098  *
1099  * Output Args: None
1100  *
1101  * Return:
1102  *   Equivalent VM physical address
1103  *
1104  * Returns the VM physical address of the translated VM virtual
1105  * address given by @gva.
1106  */
1107 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1108 
addr_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)1109 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1110 {
1111 	return addr_arch_gva2gpa(vm, gva);
1112 }
1113 
1114 /*
1115  * Virtual Translation Tables Dump
1116  *
1117  * Input Args:
1118  *   stream - Output FILE stream
1119  *   vm     - Virtual Machine
1120  *   indent - Left margin indent amount
1121  *
1122  * Output Args: None
1123  *
1124  * Return: None
1125  *
1126  * Dumps to the FILE stream given by @stream, the contents of all the
1127  * virtual translation tables for the VM given by @vm.
1128  */
1129 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1130 
virt_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1131 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1132 {
1133 	virt_arch_dump(stream, vm, indent);
1134 }
1135 
1136 
__vm_disable_nx_huge_pages(struct kvm_vm * vm)1137 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1138 {
1139 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1140 }
1141 
1142 /*
1143  * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
1144  * to allow for arch-specific setup that is common to all tests, e.g. computing
1145  * the default guest "mode".
1146  */
1147 void kvm_selftest_arch_init(void);
1148 
1149 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1150 
1151 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1152 
1153 uint32_t guest_get_vcpuid(void);
1154 
1155 #endif /* SELFTEST_KVM_UTIL_H */
1156