1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * tools/testing/selftests/kvm/lib/kvm_util.c
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8 #define _GNU_SOURCE /* for program_invocation_name */
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "processor.h"
12
13 #include <assert.h>
14 #include <sched.h>
15 #include <sys/mman.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <unistd.h>
19 #include <linux/kernel.h>
20
21 #define KVM_UTIL_MIN_PFN 2
22
23 static int vcpu_mmap_sz(void);
24
open_path_or_exit(const char * path,int flags)25 int open_path_or_exit(const char *path, int flags)
26 {
27 int fd;
28
29 fd = open(path, flags);
30 __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno));
31 TEST_ASSERT(fd >= 0, "Failed to open '%s'", path);
32
33 return fd;
34 }
35
36 /*
37 * Open KVM_DEV_PATH if available, otherwise exit the entire program.
38 *
39 * Input Args:
40 * flags - The flags to pass when opening KVM_DEV_PATH.
41 *
42 * Return:
43 * The opened file descriptor of /dev/kvm.
44 */
_open_kvm_dev_path_or_exit(int flags)45 static int _open_kvm_dev_path_or_exit(int flags)
46 {
47 return open_path_or_exit(KVM_DEV_PATH, flags);
48 }
49
open_kvm_dev_path_or_exit(void)50 int open_kvm_dev_path_or_exit(void)
51 {
52 return _open_kvm_dev_path_or_exit(O_RDONLY);
53 }
54
get_module_param_bool(const char * module_name,const char * param)55 static bool get_module_param_bool(const char *module_name, const char *param)
56 {
57 const int path_size = 128;
58 char path[path_size];
59 char value;
60 ssize_t r;
61 int fd;
62
63 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
64 module_name, param);
65 TEST_ASSERT(r < path_size,
66 "Failed to construct sysfs path in %d bytes.", path_size);
67
68 fd = open_path_or_exit(path, O_RDONLY);
69
70 r = read(fd, &value, 1);
71 TEST_ASSERT(r == 1, "read(%s) failed", path);
72
73 r = close(fd);
74 TEST_ASSERT(!r, "close(%s) failed", path);
75
76 if (value == 'Y')
77 return true;
78 else if (value == 'N')
79 return false;
80
81 TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
82 }
83
get_kvm_param_bool(const char * param)84 bool get_kvm_param_bool(const char *param)
85 {
86 return get_module_param_bool("kvm", param);
87 }
88
get_kvm_intel_param_bool(const char * param)89 bool get_kvm_intel_param_bool(const char *param)
90 {
91 return get_module_param_bool("kvm_intel", param);
92 }
93
get_kvm_amd_param_bool(const char * param)94 bool get_kvm_amd_param_bool(const char *param)
95 {
96 return get_module_param_bool("kvm_amd", param);
97 }
98
99 /*
100 * Capability
101 *
102 * Input Args:
103 * cap - Capability
104 *
105 * Output Args: None
106 *
107 * Return:
108 * On success, the Value corresponding to the capability (KVM_CAP_*)
109 * specified by the value of cap. On failure a TEST_ASSERT failure
110 * is produced.
111 *
112 * Looks up and returns the value corresponding to the capability
113 * (KVM_CAP_*) given by cap.
114 */
kvm_check_cap(long cap)115 unsigned int kvm_check_cap(long cap)
116 {
117 int ret;
118 int kvm_fd;
119
120 kvm_fd = open_kvm_dev_path_or_exit();
121 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
122 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
123
124 close(kvm_fd);
125
126 return (unsigned int)ret;
127 }
128
vm_enable_dirty_ring(struct kvm_vm * vm,uint32_t ring_size)129 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
130 {
131 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
132 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
133 else
134 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
135 vm->dirty_ring_size = ring_size;
136 }
137
vm_open(struct kvm_vm * vm)138 static void vm_open(struct kvm_vm *vm)
139 {
140 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
141
142 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
143
144 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
145 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
146 }
147
vm_guest_mode_string(uint32_t i)148 const char *vm_guest_mode_string(uint32_t i)
149 {
150 static const char * const strings[] = {
151 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
152 [VM_MODE_P52V48_16K] = "PA-bits:52, VA-bits:48, 16K pages",
153 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
154 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
155 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
156 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
157 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
158 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
159 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
160 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
161 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
162 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
163 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
164 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
165 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
166 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
167 };
168 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
169 "Missing new mode strings?");
170
171 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
172
173 return strings[i];
174 }
175
176 const struct vm_guest_mode_params vm_guest_mode_params[] = {
177 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
178 [VM_MODE_P52V48_16K] = { 52, 48, 0x4000, 14 },
179 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
180 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
181 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
182 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
183 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
184 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
185 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
186 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
187 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
188 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
189 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
190 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
191 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
192 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
193 };
194 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
195 "Missing new mode params?");
196
197 /*
198 * Initializes vm->vpages_valid to match the canonical VA space of the
199 * architecture.
200 *
201 * The default implementation is valid for architectures which split the
202 * range addressed by a single page table into a low and high region
203 * based on the MSB of the VA. On architectures with this behavior
204 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
205 */
vm_vaddr_populate_bitmap(struct kvm_vm * vm)206 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
207 {
208 sparsebit_set_num(vm->vpages_valid,
209 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
210 sparsebit_set_num(vm->vpages_valid,
211 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
212 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
213 }
214
____vm_create(struct vm_shape shape)215 struct kvm_vm *____vm_create(struct vm_shape shape)
216 {
217 struct kvm_vm *vm;
218
219 vm = calloc(1, sizeof(*vm));
220 TEST_ASSERT(vm != NULL, "Insufficient Memory");
221
222 INIT_LIST_HEAD(&vm->vcpus);
223 vm->regions.gpa_tree = RB_ROOT;
224 vm->regions.hva_tree = RB_ROOT;
225 hash_init(vm->regions.slot_hash);
226
227 vm->mode = shape.mode;
228 vm->type = shape.type;
229
230 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
231 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
232 vm->page_size = vm_guest_mode_params[vm->mode].page_size;
233 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
234
235 /* Setup mode specific traits. */
236 switch (vm->mode) {
237 case VM_MODE_P52V48_4K:
238 vm->pgtable_levels = 4;
239 break;
240 case VM_MODE_P52V48_64K:
241 vm->pgtable_levels = 3;
242 break;
243 case VM_MODE_P48V48_4K:
244 vm->pgtable_levels = 4;
245 break;
246 case VM_MODE_P48V48_64K:
247 vm->pgtable_levels = 3;
248 break;
249 case VM_MODE_P40V48_4K:
250 case VM_MODE_P36V48_4K:
251 vm->pgtable_levels = 4;
252 break;
253 case VM_MODE_P40V48_64K:
254 case VM_MODE_P36V48_64K:
255 vm->pgtable_levels = 3;
256 break;
257 case VM_MODE_P52V48_16K:
258 case VM_MODE_P48V48_16K:
259 case VM_MODE_P40V48_16K:
260 case VM_MODE_P36V48_16K:
261 vm->pgtable_levels = 4;
262 break;
263 case VM_MODE_P36V47_16K:
264 vm->pgtable_levels = 3;
265 break;
266 case VM_MODE_PXXV48_4K:
267 #ifdef __x86_64__
268 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
269 /*
270 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
271 * it doesn't take effect unless a CR4.LA57 is set, which it
272 * isn't for this mode (48-bit virtual address space).
273 */
274 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
275 "Linear address width (%d bits) not supported",
276 vm->va_bits);
277 pr_debug("Guest physical address width detected: %d\n",
278 vm->pa_bits);
279 vm->pgtable_levels = 4;
280 vm->va_bits = 48;
281 #else
282 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
283 #endif
284 break;
285 case VM_MODE_P47V64_4K:
286 vm->pgtable_levels = 5;
287 break;
288 case VM_MODE_P44V64_4K:
289 vm->pgtable_levels = 5;
290 break;
291 default:
292 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
293 }
294
295 #ifdef __aarch64__
296 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
297 if (vm->pa_bits != 40)
298 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
299 #endif
300
301 vm_open(vm);
302
303 /* Limit to VA-bit canonical virtual addresses. */
304 vm->vpages_valid = sparsebit_alloc();
305 vm_vaddr_populate_bitmap(vm);
306
307 /* Limit physical addresses to PA-bits. */
308 vm->max_gfn = vm_compute_max_gfn(vm);
309
310 /* Allocate and setup memory for guest. */
311 vm->vpages_mapped = sparsebit_alloc();
312
313 return vm;
314 }
315
vm_nr_pages_required(enum vm_guest_mode mode,uint32_t nr_runnable_vcpus,uint64_t extra_mem_pages)316 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
317 uint32_t nr_runnable_vcpus,
318 uint64_t extra_mem_pages)
319 {
320 uint64_t page_size = vm_guest_mode_params[mode].page_size;
321 uint64_t nr_pages;
322
323 TEST_ASSERT(nr_runnable_vcpus,
324 "Use vm_create_barebones() for VMs that _never_ have vCPUs");
325
326 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
327 "nr_vcpus = %d too large for host, max-vcpus = %d",
328 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
329
330 /*
331 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
332 * test code and other per-VM assets that will be loaded into memslot0.
333 */
334 nr_pages = 512;
335
336 /* Account for the per-vCPU stacks on behalf of the test. */
337 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
338
339 /*
340 * Account for the number of pages needed for the page tables. The
341 * maximum page table size for a memory region will be when the
342 * smallest page size is used. Considering each page contains x page
343 * table descriptors, the total extra size for page tables (for extra
344 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
345 * than N/x*2.
346 */
347 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
348
349 /* Account for the number of pages needed by ucall. */
350 nr_pages += ucall_nr_pages_required(page_size);
351
352 return vm_adjust_num_guest_pages(mode, nr_pages);
353 }
354
__vm_create(struct vm_shape shape,uint32_t nr_runnable_vcpus,uint64_t nr_extra_pages)355 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
356 uint64_t nr_extra_pages)
357 {
358 uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
359 nr_extra_pages);
360 struct userspace_mem_region *slot0;
361 struct kvm_vm *vm;
362 int i;
363
364 pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
365 vm_guest_mode_string(shape.mode), shape.type, nr_pages);
366
367 vm = ____vm_create(shape);
368
369 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
370 for (i = 0; i < NR_MEM_REGIONS; i++)
371 vm->memslots[i] = 0;
372
373 kvm_vm_elf_load(vm, program_invocation_name);
374
375 /*
376 * TODO: Add proper defines to protect the library's memslots, and then
377 * carve out memslot1 for the ucall MMIO address. KVM treats writes to
378 * read-only memslots as MMIO, and creating a read-only memslot for the
379 * MMIO region would prevent silently clobbering the MMIO region.
380 */
381 slot0 = memslot2region(vm, 0);
382 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
383
384 kvm_arch_vm_post_create(vm);
385
386 return vm;
387 }
388
389 /*
390 * VM Create with customized parameters
391 *
392 * Input Args:
393 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
394 * nr_vcpus - VCPU count
395 * extra_mem_pages - Non-slot0 physical memory total size
396 * guest_code - Guest entry point
397 * vcpuids - VCPU IDs
398 *
399 * Output Args: None
400 *
401 * Return:
402 * Pointer to opaque structure that describes the created VM.
403 *
404 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
405 * extra_mem_pages is only used to calculate the maximum page table size,
406 * no real memory allocation for non-slot0 memory in this function.
407 */
__vm_create_with_vcpus(struct vm_shape shape,uint32_t nr_vcpus,uint64_t extra_mem_pages,void * guest_code,struct kvm_vcpu * vcpus[])408 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
409 uint64_t extra_mem_pages,
410 void *guest_code, struct kvm_vcpu *vcpus[])
411 {
412 struct kvm_vm *vm;
413 int i;
414
415 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
416
417 vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
418
419 for (i = 0; i < nr_vcpus; ++i)
420 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
421
422 return vm;
423 }
424
__vm_create_shape_with_one_vcpu(struct vm_shape shape,struct kvm_vcpu ** vcpu,uint64_t extra_mem_pages,void * guest_code)425 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
426 struct kvm_vcpu **vcpu,
427 uint64_t extra_mem_pages,
428 void *guest_code)
429 {
430 struct kvm_vcpu *vcpus[1];
431 struct kvm_vm *vm;
432
433 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
434
435 *vcpu = vcpus[0];
436 return vm;
437 }
438
439 /*
440 * VM Restart
441 *
442 * Input Args:
443 * vm - VM that has been released before
444 *
445 * Output Args: None
446 *
447 * Reopens the file descriptors associated to the VM and reinstates the
448 * global state, such as the irqchip and the memory regions that are mapped
449 * into the guest.
450 */
kvm_vm_restart(struct kvm_vm * vmp)451 void kvm_vm_restart(struct kvm_vm *vmp)
452 {
453 int ctr;
454 struct userspace_mem_region *region;
455
456 vm_open(vmp);
457 if (vmp->has_irqchip)
458 vm_create_irqchip(vmp);
459
460 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
461 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
462
463 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
464 " rc: %i errno: %i\n"
465 " slot: %u flags: 0x%x\n"
466 " guest_phys_addr: 0x%llx size: 0x%llx",
467 ret, errno, region->region.slot,
468 region->region.flags,
469 region->region.guest_phys_addr,
470 region->region.memory_size);
471 }
472 }
473
vm_arch_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)474 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
475 uint32_t vcpu_id)
476 {
477 return __vm_vcpu_add(vm, vcpu_id);
478 }
479
vm_recreate_with_one_vcpu(struct kvm_vm * vm)480 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
481 {
482 kvm_vm_restart(vm);
483
484 return vm_vcpu_recreate(vm, 0);
485 }
486
kvm_pin_this_task_to_pcpu(uint32_t pcpu)487 void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
488 {
489 cpu_set_t mask;
490 int r;
491
492 CPU_ZERO(&mask);
493 CPU_SET(pcpu, &mask);
494 r = sched_setaffinity(0, sizeof(mask), &mask);
495 TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu);
496 }
497
parse_pcpu(const char * cpu_str,const cpu_set_t * allowed_mask)498 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
499 {
500 uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
501
502 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
503 "Not allowed to run on pCPU '%d', check cgroups?", pcpu);
504 return pcpu;
505 }
506
kvm_print_vcpu_pinning_help(void)507 void kvm_print_vcpu_pinning_help(void)
508 {
509 const char *name = program_invocation_name;
510
511 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
512 " values (target pCPU), one for each vCPU, plus an optional\n"
513 " entry for the main application task (specified via entry\n"
514 " <nr_vcpus + 1>). If used, entries must be provided for all\n"
515 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
516 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
517 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
518 " %s -v 3 -c 22,23,24,50\n\n"
519 " To leave the application task unpinned, drop the final entry:\n\n"
520 " %s -v 3 -c 22,23,24\n\n"
521 " (default: no pinning)\n", name, name);
522 }
523
kvm_parse_vcpu_pinning(const char * pcpus_string,uint32_t vcpu_to_pcpu[],int nr_vcpus)524 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
525 int nr_vcpus)
526 {
527 cpu_set_t allowed_mask;
528 char *cpu, *cpu_list;
529 char delim[2] = ",";
530 int i, r;
531
532 cpu_list = strdup(pcpus_string);
533 TEST_ASSERT(cpu_list, "strdup() allocation failed.");
534
535 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
536 TEST_ASSERT(!r, "sched_getaffinity() failed");
537
538 cpu = strtok(cpu_list, delim);
539
540 /* 1. Get all pcpus for vcpus. */
541 for (i = 0; i < nr_vcpus; i++) {
542 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i);
543 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
544 cpu = strtok(NULL, delim);
545 }
546
547 /* 2. Check if the main worker needs to be pinned. */
548 if (cpu) {
549 kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
550 cpu = strtok(NULL, delim);
551 }
552
553 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
554 free(cpu_list);
555 }
556
557 /*
558 * Userspace Memory Region Find
559 *
560 * Input Args:
561 * vm - Virtual Machine
562 * start - Starting VM physical address
563 * end - Ending VM physical address, inclusive.
564 *
565 * Output Args: None
566 *
567 * Return:
568 * Pointer to overlapping region, NULL if no such region.
569 *
570 * Searches for a region with any physical memory that overlaps with
571 * any portion of the guest physical addresses from start to end
572 * inclusive. If multiple overlapping regions exist, a pointer to any
573 * of the regions is returned. Null is returned only when no overlapping
574 * region exists.
575 */
576 static struct userspace_mem_region *
userspace_mem_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end)577 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
578 {
579 struct rb_node *node;
580
581 for (node = vm->regions.gpa_tree.rb_node; node; ) {
582 struct userspace_mem_region *region =
583 container_of(node, struct userspace_mem_region, gpa_node);
584 uint64_t existing_start = region->region.guest_phys_addr;
585 uint64_t existing_end = region->region.guest_phys_addr
586 + region->region.memory_size - 1;
587 if (start <= existing_end && end >= existing_start)
588 return region;
589
590 if (start < existing_start)
591 node = node->rb_left;
592 else
593 node = node->rb_right;
594 }
595
596 return NULL;
597 }
598
vcpu_arch_free(struct kvm_vcpu * vcpu)599 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
600 {
601
602 }
603
604 /*
605 * VM VCPU Remove
606 *
607 * Input Args:
608 * vcpu - VCPU to remove
609 *
610 * Output Args: None
611 *
612 * Return: None, TEST_ASSERT failures for all error conditions
613 *
614 * Removes a vCPU from a VM and frees its resources.
615 */
vm_vcpu_rm(struct kvm_vm * vm,struct kvm_vcpu * vcpu)616 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
617 {
618 int ret;
619
620 if (vcpu->dirty_gfns) {
621 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
622 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
623 vcpu->dirty_gfns = NULL;
624 }
625
626 ret = munmap(vcpu->run, vcpu_mmap_sz());
627 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
628
629 ret = close(vcpu->fd);
630 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
631
632 list_del(&vcpu->list);
633
634 vcpu_arch_free(vcpu);
635 free(vcpu);
636 }
637
kvm_vm_release(struct kvm_vm * vmp)638 void kvm_vm_release(struct kvm_vm *vmp)
639 {
640 struct kvm_vcpu *vcpu, *tmp;
641 int ret;
642
643 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
644 vm_vcpu_rm(vmp, vcpu);
645
646 ret = close(vmp->fd);
647 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
648
649 ret = close(vmp->kvm_fd);
650 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
651 }
652
__vm_mem_region_delete(struct kvm_vm * vm,struct userspace_mem_region * region,bool unlink)653 static void __vm_mem_region_delete(struct kvm_vm *vm,
654 struct userspace_mem_region *region,
655 bool unlink)
656 {
657 int ret;
658
659 if (unlink) {
660 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
661 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
662 hash_del(®ion->slot_node);
663 }
664
665 region->region.memory_size = 0;
666 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
667
668 sparsebit_free(®ion->unused_phy_pages);
669 ret = munmap(region->mmap_start, region->mmap_size);
670 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
671 if (region->fd >= 0) {
672 /* There's an extra map when using shared memory. */
673 ret = munmap(region->mmap_alias, region->mmap_size);
674 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
675 close(region->fd);
676 }
677 if (region->region.guest_memfd >= 0)
678 close(region->region.guest_memfd);
679
680 free(region);
681 }
682
683 /*
684 * Destroys and frees the VM pointed to by vmp.
685 */
kvm_vm_free(struct kvm_vm * vmp)686 void kvm_vm_free(struct kvm_vm *vmp)
687 {
688 int ctr;
689 struct hlist_node *node;
690 struct userspace_mem_region *region;
691
692 if (vmp == NULL)
693 return;
694
695 /* Free cached stats metadata and close FD */
696 if (vmp->stats_fd) {
697 free(vmp->stats_desc);
698 close(vmp->stats_fd);
699 }
700
701 /* Free userspace_mem_regions. */
702 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
703 __vm_mem_region_delete(vmp, region, false);
704
705 /* Free sparsebit arrays. */
706 sparsebit_free(&vmp->vpages_valid);
707 sparsebit_free(&vmp->vpages_mapped);
708
709 kvm_vm_release(vmp);
710
711 /* Free the structure describing the VM. */
712 free(vmp);
713 }
714
kvm_memfd_alloc(size_t size,bool hugepages)715 int kvm_memfd_alloc(size_t size, bool hugepages)
716 {
717 int memfd_flags = MFD_CLOEXEC;
718 int fd, r;
719
720 if (hugepages)
721 memfd_flags |= MFD_HUGETLB;
722
723 fd = memfd_create("kvm_selftest", memfd_flags);
724 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
725
726 r = ftruncate(fd, size);
727 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
728
729 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
730 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
731
732 return fd;
733 }
734
735 /*
736 * Memory Compare, host virtual to guest virtual
737 *
738 * Input Args:
739 * hva - Starting host virtual address
740 * vm - Virtual Machine
741 * gva - Starting guest virtual address
742 * len - number of bytes to compare
743 *
744 * Output Args: None
745 *
746 * Input/Output Args: None
747 *
748 * Return:
749 * Returns 0 if the bytes starting at hva for a length of len
750 * are equal the guest virtual bytes starting at gva. Returns
751 * a value < 0, if bytes at hva are less than those at gva.
752 * Otherwise a value > 0 is returned.
753 *
754 * Compares the bytes starting at the host virtual address hva, for
755 * a length of len, to the guest bytes starting at the guest virtual
756 * address given by gva.
757 */
kvm_memcmp_hva_gva(void * hva,struct kvm_vm * vm,vm_vaddr_t gva,size_t len)758 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
759 {
760 size_t amt;
761
762 /*
763 * Compare a batch of bytes until either a match is found
764 * or all the bytes have been compared.
765 */
766 for (uintptr_t offset = 0; offset < len; offset += amt) {
767 uintptr_t ptr1 = (uintptr_t)hva + offset;
768
769 /*
770 * Determine host address for guest virtual address
771 * at offset.
772 */
773 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
774
775 /*
776 * Determine amount to compare on this pass.
777 * Don't allow the comparsion to cross a page boundary.
778 */
779 amt = len - offset;
780 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
781 amt = vm->page_size - (ptr1 % vm->page_size);
782 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
783 amt = vm->page_size - (ptr2 % vm->page_size);
784
785 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
786 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
787
788 /*
789 * Perform the comparison. If there is a difference
790 * return that result to the caller, otherwise need
791 * to continue on looking for a mismatch.
792 */
793 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
794 if (ret != 0)
795 return ret;
796 }
797
798 /*
799 * No mismatch found. Let the caller know the two memory
800 * areas are equal.
801 */
802 return 0;
803 }
804
vm_userspace_mem_region_gpa_insert(struct rb_root * gpa_tree,struct userspace_mem_region * region)805 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
806 struct userspace_mem_region *region)
807 {
808 struct rb_node **cur, *parent;
809
810 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
811 struct userspace_mem_region *cregion;
812
813 cregion = container_of(*cur, typeof(*cregion), gpa_node);
814 parent = *cur;
815 if (region->region.guest_phys_addr <
816 cregion->region.guest_phys_addr)
817 cur = &(*cur)->rb_left;
818 else {
819 TEST_ASSERT(region->region.guest_phys_addr !=
820 cregion->region.guest_phys_addr,
821 "Duplicate GPA in region tree");
822
823 cur = &(*cur)->rb_right;
824 }
825 }
826
827 rb_link_node(®ion->gpa_node, parent, cur);
828 rb_insert_color(®ion->gpa_node, gpa_tree);
829 }
830
vm_userspace_mem_region_hva_insert(struct rb_root * hva_tree,struct userspace_mem_region * region)831 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
832 struct userspace_mem_region *region)
833 {
834 struct rb_node **cur, *parent;
835
836 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
837 struct userspace_mem_region *cregion;
838
839 cregion = container_of(*cur, typeof(*cregion), hva_node);
840 parent = *cur;
841 if (region->host_mem < cregion->host_mem)
842 cur = &(*cur)->rb_left;
843 else {
844 TEST_ASSERT(region->host_mem !=
845 cregion->host_mem,
846 "Duplicate HVA in region tree");
847
848 cur = &(*cur)->rb_right;
849 }
850 }
851
852 rb_link_node(®ion->hva_node, parent, cur);
853 rb_insert_color(®ion->hva_node, hva_tree);
854 }
855
856
__vm_set_user_memory_region(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva)857 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
858 uint64_t gpa, uint64_t size, void *hva)
859 {
860 struct kvm_userspace_memory_region region = {
861 .slot = slot,
862 .flags = flags,
863 .guest_phys_addr = gpa,
864 .memory_size = size,
865 .userspace_addr = (uintptr_t)hva,
866 };
867
868 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
869 }
870
vm_set_user_memory_region(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva)871 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
872 uint64_t gpa, uint64_t size, void *hva)
873 {
874 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
875
876 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
877 errno, strerror(errno));
878 }
879
__vm_set_user_memory_region2(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva,uint32_t guest_memfd,uint64_t guest_memfd_offset)880 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
881 uint64_t gpa, uint64_t size, void *hva,
882 uint32_t guest_memfd, uint64_t guest_memfd_offset)
883 {
884 struct kvm_userspace_memory_region2 region = {
885 .slot = slot,
886 .flags = flags,
887 .guest_phys_addr = gpa,
888 .memory_size = size,
889 .userspace_addr = (uintptr_t)hva,
890 .guest_memfd = guest_memfd,
891 .guest_memfd_offset = guest_memfd_offset,
892 };
893
894 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion);
895 }
896
vm_set_user_memory_region2(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva,uint32_t guest_memfd,uint64_t guest_memfd_offset)897 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
898 uint64_t gpa, uint64_t size, void *hva,
899 uint32_t guest_memfd, uint64_t guest_memfd_offset)
900 {
901 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
902 guest_memfd, guest_memfd_offset);
903
904 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)",
905 errno, strerror(errno));
906 }
907
908
909 /* FIXME: This thing needs to be ripped apart and rewritten. */
vm_mem_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags,int guest_memfd,uint64_t guest_memfd_offset)910 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
911 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
912 uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset)
913 {
914 int ret;
915 struct userspace_mem_region *region;
916 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
917 size_t mem_size = npages * vm->page_size;
918 size_t alignment;
919
920 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
921 "Number of guest pages is not compatible with the host. "
922 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
923
924 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
925 "address not on a page boundary.\n"
926 " guest_paddr: 0x%lx vm->page_size: 0x%x",
927 guest_paddr, vm->page_size);
928 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
929 <= vm->max_gfn, "Physical range beyond maximum "
930 "supported physical address,\n"
931 " guest_paddr: 0x%lx npages: 0x%lx\n"
932 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
933 guest_paddr, npages, vm->max_gfn, vm->page_size);
934
935 /*
936 * Confirm a mem region with an overlapping address doesn't
937 * already exist.
938 */
939 region = (struct userspace_mem_region *) userspace_mem_region_find(
940 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
941 if (region != NULL)
942 TEST_FAIL("overlapping userspace_mem_region already "
943 "exists\n"
944 " requested guest_paddr: 0x%lx npages: 0x%lx "
945 "page_size: 0x%x\n"
946 " existing guest_paddr: 0x%lx size: 0x%lx",
947 guest_paddr, npages, vm->page_size,
948 (uint64_t) region->region.guest_phys_addr,
949 (uint64_t) region->region.memory_size);
950
951 /* Confirm no region with the requested slot already exists. */
952 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
953 slot) {
954 if (region->region.slot != slot)
955 continue;
956
957 TEST_FAIL("A mem region with the requested slot "
958 "already exists.\n"
959 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
960 " existing slot: %u paddr: 0x%lx size: 0x%lx",
961 slot, guest_paddr, npages,
962 region->region.slot,
963 (uint64_t) region->region.guest_phys_addr,
964 (uint64_t) region->region.memory_size);
965 }
966
967 /* Allocate and initialize new mem region structure. */
968 region = calloc(1, sizeof(*region));
969 TEST_ASSERT(region != NULL, "Insufficient Memory");
970 region->mmap_size = mem_size;
971
972 #ifdef __s390x__
973 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
974 alignment = 0x100000;
975 #else
976 alignment = 1;
977 #endif
978
979 /*
980 * When using THP mmap is not guaranteed to returned a hugepage aligned
981 * address so we have to pad the mmap. Padding is not needed for HugeTLB
982 * because mmap will always return an address aligned to the HugeTLB
983 * page size.
984 */
985 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
986 alignment = max(backing_src_pagesz, alignment);
987
988 TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
989
990 /* Add enough memory to align up if necessary */
991 if (alignment > 1)
992 region->mmap_size += alignment;
993
994 region->fd = -1;
995 if (backing_src_is_shared(src_type))
996 region->fd = kvm_memfd_alloc(region->mmap_size,
997 src_type == VM_MEM_SRC_SHARED_HUGETLB);
998
999 region->mmap_start = mmap(NULL, region->mmap_size,
1000 PROT_READ | PROT_WRITE,
1001 vm_mem_backing_src_alias(src_type)->flag,
1002 region->fd, 0);
1003 TEST_ASSERT(region->mmap_start != MAP_FAILED,
1004 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1005
1006 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
1007 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
1008 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
1009 region->mmap_start, backing_src_pagesz);
1010
1011 /* Align host address */
1012 region->host_mem = align_ptr_up(region->mmap_start, alignment);
1013
1014 /* As needed perform madvise */
1015 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
1016 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
1017 ret = madvise(region->host_mem, mem_size,
1018 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
1019 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
1020 region->host_mem, mem_size,
1021 vm_mem_backing_src_alias(src_type)->name);
1022 }
1023
1024 region->backing_src_type = src_type;
1025
1026 if (flags & KVM_MEM_GUEST_MEMFD) {
1027 if (guest_memfd < 0) {
1028 uint32_t guest_memfd_flags = 0;
1029 TEST_ASSERT(!guest_memfd_offset,
1030 "Offset must be zero when creating new guest_memfd");
1031 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
1032 } else {
1033 /*
1034 * Install a unique fd for each memslot so that the fd
1035 * can be closed when the region is deleted without
1036 * needing to track if the fd is owned by the framework
1037 * or by the caller.
1038 */
1039 guest_memfd = dup(guest_memfd);
1040 TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd));
1041 }
1042
1043 region->region.guest_memfd = guest_memfd;
1044 region->region.guest_memfd_offset = guest_memfd_offset;
1045 } else {
1046 region->region.guest_memfd = -1;
1047 }
1048
1049 region->unused_phy_pages = sparsebit_alloc();
1050 sparsebit_set_num(region->unused_phy_pages,
1051 guest_paddr >> vm->page_shift, npages);
1052 region->region.slot = slot;
1053 region->region.flags = flags;
1054 region->region.guest_phys_addr = guest_paddr;
1055 region->region.memory_size = npages * vm->page_size;
1056 region->region.userspace_addr = (uintptr_t) region->host_mem;
1057 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1058 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1059 " rc: %i errno: %i\n"
1060 " slot: %u flags: 0x%x\n"
1061 " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d",
1062 ret, errno, slot, flags,
1063 guest_paddr, (uint64_t) region->region.memory_size,
1064 region->region.guest_memfd);
1065
1066 /* Add to quick lookup data structures */
1067 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1068 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1069 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
1070
1071 /* If shared memory, create an alias. */
1072 if (region->fd >= 0) {
1073 region->mmap_alias = mmap(NULL, region->mmap_size,
1074 PROT_READ | PROT_WRITE,
1075 vm_mem_backing_src_alias(src_type)->flag,
1076 region->fd, 0);
1077 TEST_ASSERT(region->mmap_alias != MAP_FAILED,
1078 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1079
1080 /* Align host alias address */
1081 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
1082 }
1083 }
1084
vm_userspace_mem_region_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags)1085 void vm_userspace_mem_region_add(struct kvm_vm *vm,
1086 enum vm_mem_backing_src_type src_type,
1087 uint64_t guest_paddr, uint32_t slot,
1088 uint64_t npages, uint32_t flags)
1089 {
1090 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0);
1091 }
1092
1093 /*
1094 * Memslot to region
1095 *
1096 * Input Args:
1097 * vm - Virtual Machine
1098 * memslot - KVM memory slot ID
1099 *
1100 * Output Args: None
1101 *
1102 * Return:
1103 * Pointer to memory region structure that describe memory region
1104 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
1105 * on error (e.g. currently no memory region using memslot as a KVM
1106 * memory slot ID).
1107 */
1108 struct userspace_mem_region *
memslot2region(struct kvm_vm * vm,uint32_t memslot)1109 memslot2region(struct kvm_vm *vm, uint32_t memslot)
1110 {
1111 struct userspace_mem_region *region;
1112
1113 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1114 memslot)
1115 if (region->region.slot == memslot)
1116 return region;
1117
1118 fprintf(stderr, "No mem region with the requested slot found,\n"
1119 " requested slot: %u\n", memslot);
1120 fputs("---- vm dump ----\n", stderr);
1121 vm_dump(stderr, vm, 2);
1122 TEST_FAIL("Mem region not found");
1123 return NULL;
1124 }
1125
1126 /*
1127 * VM Memory Region Flags Set
1128 *
1129 * Input Args:
1130 * vm - Virtual Machine
1131 * flags - Starting guest physical address
1132 *
1133 * Output Args: None
1134 *
1135 * Return: None
1136 *
1137 * Sets the flags of the memory region specified by the value of slot,
1138 * to the values given by flags.
1139 */
vm_mem_region_set_flags(struct kvm_vm * vm,uint32_t slot,uint32_t flags)1140 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
1141 {
1142 int ret;
1143 struct userspace_mem_region *region;
1144
1145 region = memslot2region(vm, slot);
1146
1147 region->region.flags = flags;
1148
1149 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1150
1151 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1152 " rc: %i errno: %i slot: %u flags: 0x%x",
1153 ret, errno, slot, flags);
1154 }
1155
1156 /*
1157 * VM Memory Region Move
1158 *
1159 * Input Args:
1160 * vm - Virtual Machine
1161 * slot - Slot of the memory region to move
1162 * new_gpa - Starting guest physical address
1163 *
1164 * Output Args: None
1165 *
1166 * Return: None
1167 *
1168 * Change the gpa of a memory region.
1169 */
vm_mem_region_move(struct kvm_vm * vm,uint32_t slot,uint64_t new_gpa)1170 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1171 {
1172 struct userspace_mem_region *region;
1173 int ret;
1174
1175 region = memslot2region(vm, slot);
1176
1177 region->region.guest_phys_addr = new_gpa;
1178
1179 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1180
1181 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n"
1182 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1183 ret, errno, slot, new_gpa);
1184 }
1185
1186 /*
1187 * VM Memory Region Delete
1188 *
1189 * Input Args:
1190 * vm - Virtual Machine
1191 * slot - Slot of the memory region to delete
1192 *
1193 * Output Args: None
1194 *
1195 * Return: None
1196 *
1197 * Delete a memory region.
1198 */
vm_mem_region_delete(struct kvm_vm * vm,uint32_t slot)1199 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1200 {
1201 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1202 }
1203
vm_guest_mem_fallocate(struct kvm_vm * vm,uint64_t base,uint64_t size,bool punch_hole)1204 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
1205 bool punch_hole)
1206 {
1207 const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
1208 struct userspace_mem_region *region;
1209 uint64_t end = base + size;
1210 uint64_t gpa, len;
1211 off_t fd_offset;
1212 int ret;
1213
1214 for (gpa = base; gpa < end; gpa += len) {
1215 uint64_t offset;
1216
1217 region = userspace_mem_region_find(vm, gpa, gpa);
1218 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
1219 "Private memory region not found for GPA 0x%lx", gpa);
1220
1221 offset = gpa - region->region.guest_phys_addr;
1222 fd_offset = region->region.guest_memfd_offset + offset;
1223 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
1224
1225 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
1226 TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
1227 punch_hole ? "punch hole" : "allocate", gpa, len,
1228 region->region.guest_memfd, mode, fd_offset);
1229 }
1230 }
1231
1232 /* Returns the size of a vCPU's kvm_run structure. */
vcpu_mmap_sz(void)1233 static int vcpu_mmap_sz(void)
1234 {
1235 int dev_fd, ret;
1236
1237 dev_fd = open_kvm_dev_path_or_exit();
1238
1239 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1240 TEST_ASSERT(ret >= sizeof(struct kvm_run),
1241 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1242
1243 close(dev_fd);
1244
1245 return ret;
1246 }
1247
vcpu_exists(struct kvm_vm * vm,uint32_t vcpu_id)1248 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1249 {
1250 struct kvm_vcpu *vcpu;
1251
1252 list_for_each_entry(vcpu, &vm->vcpus, list) {
1253 if (vcpu->id == vcpu_id)
1254 return true;
1255 }
1256
1257 return false;
1258 }
1259
1260 /*
1261 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1262 * No additional vCPU setup is done. Returns the vCPU.
1263 */
__vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)1264 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1265 {
1266 struct kvm_vcpu *vcpu;
1267
1268 /* Confirm a vcpu with the specified id doesn't already exist. */
1269 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id);
1270
1271 /* Allocate and initialize new vcpu structure. */
1272 vcpu = calloc(1, sizeof(*vcpu));
1273 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1274
1275 vcpu->vm = vm;
1276 vcpu->id = vcpu_id;
1277 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1278 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
1279
1280 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1281 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1282 vcpu_mmap_sz(), sizeof(*vcpu->run));
1283 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1284 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1285 TEST_ASSERT(vcpu->run != MAP_FAILED,
1286 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1287
1288 /* Add to linked-list of VCPUs. */
1289 list_add(&vcpu->list, &vm->vcpus);
1290
1291 return vcpu;
1292 }
1293
1294 /*
1295 * VM Virtual Address Unused Gap
1296 *
1297 * Input Args:
1298 * vm - Virtual Machine
1299 * sz - Size (bytes)
1300 * vaddr_min - Minimum Virtual Address
1301 *
1302 * Output Args: None
1303 *
1304 * Return:
1305 * Lowest virtual address at or below vaddr_min, with at least
1306 * sz unused bytes. TEST_ASSERT failure if no area of at least
1307 * size sz is available.
1308 *
1309 * Within the VM specified by vm, locates the lowest starting virtual
1310 * address >= vaddr_min, that has at least sz unallocated bytes. A
1311 * TEST_ASSERT failure occurs for invalid input or no area of at least
1312 * sz unallocated bytes >= vaddr_min is available.
1313 */
vm_vaddr_unused_gap(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min)1314 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1315 vm_vaddr_t vaddr_min)
1316 {
1317 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1318
1319 /* Determine lowest permitted virtual page index. */
1320 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1321 if ((pgidx_start * vm->page_size) < vaddr_min)
1322 goto no_va_found;
1323
1324 /* Loop over section with enough valid virtual page indexes. */
1325 if (!sparsebit_is_set_num(vm->vpages_valid,
1326 pgidx_start, pages))
1327 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1328 pgidx_start, pages);
1329 do {
1330 /*
1331 * Are there enough unused virtual pages available at
1332 * the currently proposed starting virtual page index.
1333 * If not, adjust proposed starting index to next
1334 * possible.
1335 */
1336 if (sparsebit_is_clear_num(vm->vpages_mapped,
1337 pgidx_start, pages))
1338 goto va_found;
1339 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1340 pgidx_start, pages);
1341 if (pgidx_start == 0)
1342 goto no_va_found;
1343
1344 /*
1345 * If needed, adjust proposed starting virtual address,
1346 * to next range of valid virtual addresses.
1347 */
1348 if (!sparsebit_is_set_num(vm->vpages_valid,
1349 pgidx_start, pages)) {
1350 pgidx_start = sparsebit_next_set_num(
1351 vm->vpages_valid, pgidx_start, pages);
1352 if (pgidx_start == 0)
1353 goto no_va_found;
1354 }
1355 } while (pgidx_start != 0);
1356
1357 no_va_found:
1358 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1359
1360 /* NOT REACHED */
1361 return -1;
1362
1363 va_found:
1364 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1365 pgidx_start, pages),
1366 "Unexpected, invalid virtual page index range,\n"
1367 " pgidx_start: 0x%lx\n"
1368 " pages: 0x%lx",
1369 pgidx_start, pages);
1370 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1371 pgidx_start, pages),
1372 "Unexpected, pages already mapped,\n"
1373 " pgidx_start: 0x%lx\n"
1374 " pages: 0x%lx",
1375 pgidx_start, pages);
1376
1377 return pgidx_start * vm->page_size;
1378 }
1379
__vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type)1380 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
1381 enum kvm_mem_region_type type)
1382 {
1383 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1384
1385 virt_pgd_alloc(vm);
1386 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
1387 KVM_UTIL_MIN_PFN * vm->page_size,
1388 vm->memslots[type]);
1389
1390 /*
1391 * Find an unused range of virtual page addresses of at least
1392 * pages in length.
1393 */
1394 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1395
1396 /* Map the virtual pages. */
1397 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1398 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1399
1400 virt_pg_map(vm, vaddr, paddr);
1401
1402 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1403 }
1404
1405 return vaddr_start;
1406 }
1407
1408 /*
1409 * VM Virtual Address Allocate
1410 *
1411 * Input Args:
1412 * vm - Virtual Machine
1413 * sz - Size in bytes
1414 * vaddr_min - Minimum starting virtual address
1415 *
1416 * Output Args: None
1417 *
1418 * Return:
1419 * Starting guest virtual address
1420 *
1421 * Allocates at least sz bytes within the virtual address space of the vm
1422 * given by vm. The allocated bytes are mapped to a virtual address >=
1423 * the address given by vaddr_min. Note that each allocation uses a
1424 * a unique set of pages, with the minimum real allocation being at least
1425 * a page. The allocated physical space comes from the TEST_DATA memory region.
1426 */
vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min)1427 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1428 {
1429 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
1430 }
1431
1432 /*
1433 * VM Virtual Address Allocate Pages
1434 *
1435 * Input Args:
1436 * vm - Virtual Machine
1437 *
1438 * Output Args: None
1439 *
1440 * Return:
1441 * Starting guest virtual address
1442 *
1443 * Allocates at least N system pages worth of bytes within the virtual address
1444 * space of the vm.
1445 */
vm_vaddr_alloc_pages(struct kvm_vm * vm,int nr_pages)1446 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1447 {
1448 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1449 }
1450
__vm_vaddr_alloc_page(struct kvm_vm * vm,enum kvm_mem_region_type type)1451 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
1452 {
1453 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
1454 }
1455
1456 /*
1457 * VM Virtual Address Allocate Page
1458 *
1459 * Input Args:
1460 * vm - Virtual Machine
1461 *
1462 * Output Args: None
1463 *
1464 * Return:
1465 * Starting guest virtual address
1466 *
1467 * Allocates at least one system page worth of bytes within the virtual address
1468 * space of the vm.
1469 */
vm_vaddr_alloc_page(struct kvm_vm * vm)1470 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1471 {
1472 return vm_vaddr_alloc_pages(vm, 1);
1473 }
1474
1475 /*
1476 * Map a range of VM virtual address to the VM's physical address
1477 *
1478 * Input Args:
1479 * vm - Virtual Machine
1480 * vaddr - Virtuall address to map
1481 * paddr - VM Physical Address
1482 * npages - The number of pages to map
1483 *
1484 * Output Args: None
1485 *
1486 * Return: None
1487 *
1488 * Within the VM given by @vm, creates a virtual translation for
1489 * @npages starting at @vaddr to the page range starting at @paddr.
1490 */
virt_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,unsigned int npages)1491 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1492 unsigned int npages)
1493 {
1494 size_t page_size = vm->page_size;
1495 size_t size = npages * page_size;
1496
1497 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1498 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1499
1500 while (npages--) {
1501 virt_pg_map(vm, vaddr, paddr);
1502 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1503
1504 vaddr += page_size;
1505 paddr += page_size;
1506 }
1507 }
1508
1509 /*
1510 * Address VM Physical to Host Virtual
1511 *
1512 * Input Args:
1513 * vm - Virtual Machine
1514 * gpa - VM physical address
1515 *
1516 * Output Args: None
1517 *
1518 * Return:
1519 * Equivalent host virtual address
1520 *
1521 * Locates the memory region containing the VM physical address given
1522 * by gpa, within the VM given by vm. When found, the host virtual
1523 * address providing the memory to the vm physical address is returned.
1524 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1525 */
addr_gpa2hva(struct kvm_vm * vm,vm_paddr_t gpa)1526 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1527 {
1528 struct userspace_mem_region *region;
1529
1530 region = userspace_mem_region_find(vm, gpa, gpa);
1531 if (!region) {
1532 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1533 return NULL;
1534 }
1535
1536 return (void *)((uintptr_t)region->host_mem
1537 + (gpa - region->region.guest_phys_addr));
1538 }
1539
1540 /*
1541 * Address Host Virtual to VM Physical
1542 *
1543 * Input Args:
1544 * vm - Virtual Machine
1545 * hva - Host virtual address
1546 *
1547 * Output Args: None
1548 *
1549 * Return:
1550 * Equivalent VM physical address
1551 *
1552 * Locates the memory region containing the host virtual address given
1553 * by hva, within the VM given by vm. When found, the equivalent
1554 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1555 * region containing hva exists.
1556 */
addr_hva2gpa(struct kvm_vm * vm,void * hva)1557 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1558 {
1559 struct rb_node *node;
1560
1561 for (node = vm->regions.hva_tree.rb_node; node; ) {
1562 struct userspace_mem_region *region =
1563 container_of(node, struct userspace_mem_region, hva_node);
1564
1565 if (hva >= region->host_mem) {
1566 if (hva <= (region->host_mem
1567 + region->region.memory_size - 1))
1568 return (vm_paddr_t)((uintptr_t)
1569 region->region.guest_phys_addr
1570 + (hva - (uintptr_t)region->host_mem));
1571
1572 node = node->rb_right;
1573 } else
1574 node = node->rb_left;
1575 }
1576
1577 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1578 return -1;
1579 }
1580
1581 /*
1582 * Address VM physical to Host Virtual *alias*.
1583 *
1584 * Input Args:
1585 * vm - Virtual Machine
1586 * gpa - VM physical address
1587 *
1588 * Output Args: None
1589 *
1590 * Return:
1591 * Equivalent address within the host virtual *alias* area, or NULL
1592 * (without failing the test) if the guest memory is not shared (so
1593 * no alias exists).
1594 *
1595 * Create a writable, shared virtual=>physical alias for the specific GPA.
1596 * The primary use case is to allow the host selftest to manipulate guest
1597 * memory without mapping said memory in the guest's address space. And, for
1598 * userfaultfd-based demand paging, to do so without triggering userfaults.
1599 */
addr_gpa2alias(struct kvm_vm * vm,vm_paddr_t gpa)1600 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1601 {
1602 struct userspace_mem_region *region;
1603 uintptr_t offset;
1604
1605 region = userspace_mem_region_find(vm, gpa, gpa);
1606 if (!region)
1607 return NULL;
1608
1609 if (!region->host_alias)
1610 return NULL;
1611
1612 offset = gpa - region->region.guest_phys_addr;
1613 return (void *) ((uintptr_t) region->host_alias + offset);
1614 }
1615
1616 /* Create an interrupt controller chip for the specified VM. */
vm_create_irqchip(struct kvm_vm * vm)1617 void vm_create_irqchip(struct kvm_vm *vm)
1618 {
1619 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1620
1621 vm->has_irqchip = true;
1622 }
1623
_vcpu_run(struct kvm_vcpu * vcpu)1624 int _vcpu_run(struct kvm_vcpu *vcpu)
1625 {
1626 int rc;
1627
1628 do {
1629 rc = __vcpu_run(vcpu);
1630 } while (rc == -1 && errno == EINTR);
1631
1632 assert_on_unhandled_exception(vcpu);
1633
1634 return rc;
1635 }
1636
1637 /*
1638 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1639 * Assert if the KVM returns an error (other than -EINTR).
1640 */
vcpu_run(struct kvm_vcpu * vcpu)1641 void vcpu_run(struct kvm_vcpu *vcpu)
1642 {
1643 int ret = _vcpu_run(vcpu);
1644
1645 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1646 }
1647
vcpu_run_complete_io(struct kvm_vcpu * vcpu)1648 void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1649 {
1650 int ret;
1651
1652 vcpu->run->immediate_exit = 1;
1653 ret = __vcpu_run(vcpu);
1654 vcpu->run->immediate_exit = 0;
1655
1656 TEST_ASSERT(ret == -1 && errno == EINTR,
1657 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1658 ret, errno);
1659 }
1660
1661 /*
1662 * Get the list of guest registers which are supported for
1663 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
1664 * it is the caller's responsibility to free the list.
1665 */
vcpu_get_reg_list(struct kvm_vcpu * vcpu)1666 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1667 {
1668 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1669 int ret;
1670
1671 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
1672 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1673
1674 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1675 reg_list->n = reg_list_n.n;
1676 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1677 return reg_list;
1678 }
1679
vcpu_map_dirty_ring(struct kvm_vcpu * vcpu)1680 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1681 {
1682 uint32_t page_size = getpagesize();
1683 uint32_t size = vcpu->vm->dirty_ring_size;
1684
1685 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1686
1687 if (!vcpu->dirty_gfns) {
1688 void *addr;
1689
1690 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1691 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1692 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1693
1694 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1695 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1696 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1697
1698 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1699 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1700 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1701
1702 vcpu->dirty_gfns = addr;
1703 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1704 }
1705
1706 return vcpu->dirty_gfns;
1707 }
1708
1709 /*
1710 * Device Ioctl
1711 */
1712
__kvm_has_device_attr(int dev_fd,uint32_t group,uint64_t attr)1713 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
1714 {
1715 struct kvm_device_attr attribute = {
1716 .group = group,
1717 .attr = attr,
1718 .flags = 0,
1719 };
1720
1721 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1722 }
1723
__kvm_test_create_device(struct kvm_vm * vm,uint64_t type)1724 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1725 {
1726 struct kvm_create_device create_dev = {
1727 .type = type,
1728 .flags = KVM_CREATE_DEVICE_TEST,
1729 };
1730
1731 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1732 }
1733
__kvm_create_device(struct kvm_vm * vm,uint64_t type)1734 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1735 {
1736 struct kvm_create_device create_dev = {
1737 .type = type,
1738 .fd = -1,
1739 .flags = 0,
1740 };
1741 int err;
1742
1743 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1744 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1745 return err ? : create_dev.fd;
1746 }
1747
__kvm_device_attr_get(int dev_fd,uint32_t group,uint64_t attr,void * val)1748 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
1749 {
1750 struct kvm_device_attr kvmattr = {
1751 .group = group,
1752 .attr = attr,
1753 .flags = 0,
1754 .addr = (uintptr_t)val,
1755 };
1756
1757 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1758 }
1759
__kvm_device_attr_set(int dev_fd,uint32_t group,uint64_t attr,void * val)1760 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
1761 {
1762 struct kvm_device_attr kvmattr = {
1763 .group = group,
1764 .attr = attr,
1765 .flags = 0,
1766 .addr = (uintptr_t)val,
1767 };
1768
1769 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1770 }
1771
1772 /*
1773 * IRQ related functions.
1774 */
1775
_kvm_irq_line(struct kvm_vm * vm,uint32_t irq,int level)1776 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1777 {
1778 struct kvm_irq_level irq_level = {
1779 .irq = irq,
1780 .level = level,
1781 };
1782
1783 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1784 }
1785
kvm_irq_line(struct kvm_vm * vm,uint32_t irq,int level)1786 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1787 {
1788 int ret = _kvm_irq_line(vm, irq, level);
1789
1790 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1791 }
1792
kvm_gsi_routing_create(void)1793 struct kvm_irq_routing *kvm_gsi_routing_create(void)
1794 {
1795 struct kvm_irq_routing *routing;
1796 size_t size;
1797
1798 size = sizeof(struct kvm_irq_routing);
1799 /* Allocate space for the max number of entries: this wastes 196 KBs. */
1800 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1801 routing = calloc(1, size);
1802 assert(routing);
1803
1804 return routing;
1805 }
1806
kvm_gsi_routing_irqchip_add(struct kvm_irq_routing * routing,uint32_t gsi,uint32_t pin)1807 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1808 uint32_t gsi, uint32_t pin)
1809 {
1810 int i;
1811
1812 assert(routing);
1813 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1814
1815 i = routing->nr;
1816 routing->entries[i].gsi = gsi;
1817 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1818 routing->entries[i].flags = 0;
1819 routing->entries[i].u.irqchip.irqchip = 0;
1820 routing->entries[i].u.irqchip.pin = pin;
1821 routing->nr++;
1822 }
1823
_kvm_gsi_routing_write(struct kvm_vm * vm,struct kvm_irq_routing * routing)1824 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1825 {
1826 int ret;
1827
1828 assert(routing);
1829 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1830 free(routing);
1831
1832 return ret;
1833 }
1834
kvm_gsi_routing_write(struct kvm_vm * vm,struct kvm_irq_routing * routing)1835 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1836 {
1837 int ret;
1838
1839 ret = _kvm_gsi_routing_write(vm, routing);
1840 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1841 }
1842
1843 /*
1844 * VM Dump
1845 *
1846 * Input Args:
1847 * vm - Virtual Machine
1848 * indent - Left margin indent amount
1849 *
1850 * Output Args:
1851 * stream - Output FILE stream
1852 *
1853 * Return: None
1854 *
1855 * Dumps the current state of the VM given by vm, to the FILE stream
1856 * given by stream.
1857 */
vm_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1858 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1859 {
1860 int ctr;
1861 struct userspace_mem_region *region;
1862 struct kvm_vcpu *vcpu;
1863
1864 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1865 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1866 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1867 fprintf(stream, "%*sMem Regions:\n", indent, "");
1868 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1869 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1870 "host_virt: %p\n", indent + 2, "",
1871 (uint64_t) region->region.guest_phys_addr,
1872 (uint64_t) region->region.memory_size,
1873 region->host_mem);
1874 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1875 sparsebit_dump(stream, region->unused_phy_pages, 0);
1876 }
1877 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1878 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1879 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1880 vm->pgd_created);
1881 if (vm->pgd_created) {
1882 fprintf(stream, "%*sVirtual Translation Tables:\n",
1883 indent + 2, "");
1884 virt_dump(stream, vm, indent + 4);
1885 }
1886 fprintf(stream, "%*sVCPUs:\n", indent, "");
1887
1888 list_for_each_entry(vcpu, &vm->vcpus, list)
1889 vcpu_dump(stream, vcpu, indent + 2);
1890 }
1891
1892 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
1893
1894 /* Known KVM exit reasons */
1895 static struct exit_reason {
1896 unsigned int reason;
1897 const char *name;
1898 } exit_reasons_known[] = {
1899 KVM_EXIT_STRING(UNKNOWN),
1900 KVM_EXIT_STRING(EXCEPTION),
1901 KVM_EXIT_STRING(IO),
1902 KVM_EXIT_STRING(HYPERCALL),
1903 KVM_EXIT_STRING(DEBUG),
1904 KVM_EXIT_STRING(HLT),
1905 KVM_EXIT_STRING(MMIO),
1906 KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
1907 KVM_EXIT_STRING(SHUTDOWN),
1908 KVM_EXIT_STRING(FAIL_ENTRY),
1909 KVM_EXIT_STRING(INTR),
1910 KVM_EXIT_STRING(SET_TPR),
1911 KVM_EXIT_STRING(TPR_ACCESS),
1912 KVM_EXIT_STRING(S390_SIEIC),
1913 KVM_EXIT_STRING(S390_RESET),
1914 KVM_EXIT_STRING(DCR),
1915 KVM_EXIT_STRING(NMI),
1916 KVM_EXIT_STRING(INTERNAL_ERROR),
1917 KVM_EXIT_STRING(OSI),
1918 KVM_EXIT_STRING(PAPR_HCALL),
1919 KVM_EXIT_STRING(S390_UCONTROL),
1920 KVM_EXIT_STRING(WATCHDOG),
1921 KVM_EXIT_STRING(S390_TSCH),
1922 KVM_EXIT_STRING(EPR),
1923 KVM_EXIT_STRING(SYSTEM_EVENT),
1924 KVM_EXIT_STRING(S390_STSI),
1925 KVM_EXIT_STRING(IOAPIC_EOI),
1926 KVM_EXIT_STRING(HYPERV),
1927 KVM_EXIT_STRING(ARM_NISV),
1928 KVM_EXIT_STRING(X86_RDMSR),
1929 KVM_EXIT_STRING(X86_WRMSR),
1930 KVM_EXIT_STRING(DIRTY_RING_FULL),
1931 KVM_EXIT_STRING(AP_RESET_HOLD),
1932 KVM_EXIT_STRING(X86_BUS_LOCK),
1933 KVM_EXIT_STRING(XEN),
1934 KVM_EXIT_STRING(RISCV_SBI),
1935 KVM_EXIT_STRING(RISCV_CSR),
1936 KVM_EXIT_STRING(NOTIFY),
1937 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1938 KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
1939 #endif
1940 };
1941
1942 /*
1943 * Exit Reason String
1944 *
1945 * Input Args:
1946 * exit_reason - Exit reason
1947 *
1948 * Output Args: None
1949 *
1950 * Return:
1951 * Constant string pointer describing the exit reason.
1952 *
1953 * Locates and returns a constant string that describes the KVM exit
1954 * reason given by exit_reason. If no such string is found, a constant
1955 * string of "Unknown" is returned.
1956 */
exit_reason_str(unsigned int exit_reason)1957 const char *exit_reason_str(unsigned int exit_reason)
1958 {
1959 unsigned int n1;
1960
1961 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1962 if (exit_reason == exit_reasons_known[n1].reason)
1963 return exit_reasons_known[n1].name;
1964 }
1965
1966 return "Unknown";
1967 }
1968
1969 /*
1970 * Physical Contiguous Page Allocator
1971 *
1972 * Input Args:
1973 * vm - Virtual Machine
1974 * num - number of pages
1975 * paddr_min - Physical address minimum
1976 * memslot - Memory region to allocate page from
1977 *
1978 * Output Args: None
1979 *
1980 * Return:
1981 * Starting physical address
1982 *
1983 * Within the VM specified by vm, locates a range of available physical
1984 * pages at or above paddr_min. If found, the pages are marked as in use
1985 * and their base address is returned. A TEST_ASSERT failure occurs if
1986 * not enough pages are available at or above paddr_min.
1987 */
vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot)1988 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1989 vm_paddr_t paddr_min, uint32_t memslot)
1990 {
1991 struct userspace_mem_region *region;
1992 sparsebit_idx_t pg, base;
1993
1994 TEST_ASSERT(num > 0, "Must allocate at least one page");
1995
1996 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1997 "not divisible by page size.\n"
1998 " paddr_min: 0x%lx page_size: 0x%x",
1999 paddr_min, vm->page_size);
2000
2001 region = memslot2region(vm, memslot);
2002 base = pg = paddr_min >> vm->page_shift;
2003
2004 do {
2005 for (; pg < base + num; ++pg) {
2006 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
2007 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
2008 break;
2009 }
2010 }
2011 } while (pg && pg != base + num);
2012
2013 if (pg == 0) {
2014 fprintf(stderr, "No guest physical page available, "
2015 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
2016 paddr_min, vm->page_size, memslot);
2017 fputs("---- vm dump ----\n", stderr);
2018 vm_dump(stderr, vm, 2);
2019 abort();
2020 }
2021
2022 for (pg = base; pg < base + num; ++pg)
2023 sparsebit_clear(region->unused_phy_pages, pg);
2024
2025 return base * vm->page_size;
2026 }
2027
vm_phy_page_alloc(struct kvm_vm * vm,vm_paddr_t paddr_min,uint32_t memslot)2028 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
2029 uint32_t memslot)
2030 {
2031 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
2032 }
2033
vm_alloc_page_table(struct kvm_vm * vm)2034 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
2035 {
2036 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
2037 vm->memslots[MEM_REGION_PT]);
2038 }
2039
2040 /*
2041 * Address Guest Virtual to Host Virtual
2042 *
2043 * Input Args:
2044 * vm - Virtual Machine
2045 * gva - VM virtual address
2046 *
2047 * Output Args: None
2048 *
2049 * Return:
2050 * Equivalent host virtual address
2051 */
addr_gva2hva(struct kvm_vm * vm,vm_vaddr_t gva)2052 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
2053 {
2054 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
2055 }
2056
vm_compute_max_gfn(struct kvm_vm * vm)2057 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
2058 {
2059 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
2060 }
2061
vm_calc_num_pages(unsigned int num_pages,unsigned int page_shift,unsigned int new_page_shift,bool ceil)2062 static unsigned int vm_calc_num_pages(unsigned int num_pages,
2063 unsigned int page_shift,
2064 unsigned int new_page_shift,
2065 bool ceil)
2066 {
2067 unsigned int n = 1 << (new_page_shift - page_shift);
2068
2069 if (page_shift >= new_page_shift)
2070 return num_pages * (1 << (page_shift - new_page_shift));
2071
2072 return num_pages / n + !!(ceil && num_pages % n);
2073 }
2074
getpageshift(void)2075 static inline int getpageshift(void)
2076 {
2077 return __builtin_ffs(getpagesize()) - 1;
2078 }
2079
2080 unsigned int
vm_num_host_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)2081 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2082 {
2083 return vm_calc_num_pages(num_guest_pages,
2084 vm_guest_mode_params[mode].page_shift,
2085 getpageshift(), true);
2086 }
2087
2088 unsigned int
vm_num_guest_pages(enum vm_guest_mode mode,unsigned int num_host_pages)2089 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2090 {
2091 return vm_calc_num_pages(num_host_pages, getpageshift(),
2092 vm_guest_mode_params[mode].page_shift, false);
2093 }
2094
vm_calc_num_guest_pages(enum vm_guest_mode mode,size_t size)2095 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2096 {
2097 unsigned int n;
2098 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2099 return vm_adjust_num_guest_pages(mode, n);
2100 }
2101
2102 /*
2103 * Read binary stats descriptors
2104 *
2105 * Input Args:
2106 * stats_fd - the file descriptor for the binary stats file from which to read
2107 * header - the binary stats metadata header corresponding to the given FD
2108 *
2109 * Output Args: None
2110 *
2111 * Return:
2112 * A pointer to a newly allocated series of stat descriptors.
2113 * Caller is responsible for freeing the returned kvm_stats_desc.
2114 *
2115 * Read the stats descriptors from the binary stats interface.
2116 */
read_stats_descriptors(int stats_fd,struct kvm_stats_header * header)2117 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
2118 struct kvm_stats_header *header)
2119 {
2120 struct kvm_stats_desc *stats_desc;
2121 ssize_t desc_size, total_size, ret;
2122
2123 desc_size = get_stats_descriptor_size(header);
2124 total_size = header->num_desc * desc_size;
2125
2126 stats_desc = calloc(header->num_desc, desc_size);
2127 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
2128
2129 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
2130 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
2131
2132 return stats_desc;
2133 }
2134
2135 /*
2136 * Read stat data for a particular stat
2137 *
2138 * Input Args:
2139 * stats_fd - the file descriptor for the binary stats file from which to read
2140 * header - the binary stats metadata header corresponding to the given FD
2141 * desc - the binary stat metadata for the particular stat to be read
2142 * max_elements - the maximum number of 8-byte values to read into data
2143 *
2144 * Output Args:
2145 * data - the buffer into which stat data should be read
2146 *
2147 * Read the data values of a specified stat from the binary stats interface.
2148 */
read_stat_data(int stats_fd,struct kvm_stats_header * header,struct kvm_stats_desc * desc,uint64_t * data,size_t max_elements)2149 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
2150 struct kvm_stats_desc *desc, uint64_t *data,
2151 size_t max_elements)
2152 {
2153 size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
2154 size_t size = nr_elements * sizeof(*data);
2155 ssize_t ret;
2156
2157 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
2158 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
2159
2160 ret = pread(stats_fd, data, size,
2161 header->data_offset + desc->offset);
2162
2163 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
2164 desc->name, errno, strerror(errno));
2165 TEST_ASSERT(ret == size,
2166 "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
2167 desc->name, size, ret);
2168 }
2169
2170 /*
2171 * Read the data of the named stat
2172 *
2173 * Input Args:
2174 * vm - the VM for which the stat should be read
2175 * stat_name - the name of the stat to read
2176 * max_elements - the maximum number of 8-byte values to read into data
2177 *
2178 * Output Args:
2179 * data - the buffer into which stat data should be read
2180 *
2181 * Read the data values of a specified stat from the binary stats interface.
2182 */
__vm_get_stat(struct kvm_vm * vm,const char * stat_name,uint64_t * data,size_t max_elements)2183 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
2184 size_t max_elements)
2185 {
2186 struct kvm_stats_desc *desc;
2187 size_t size_desc;
2188 int i;
2189
2190 if (!vm->stats_fd) {
2191 vm->stats_fd = vm_get_stats_fd(vm);
2192 read_stats_header(vm->stats_fd, &vm->stats_header);
2193 vm->stats_desc = read_stats_descriptors(vm->stats_fd,
2194 &vm->stats_header);
2195 }
2196
2197 size_desc = get_stats_descriptor_size(&vm->stats_header);
2198
2199 for (i = 0; i < vm->stats_header.num_desc; ++i) {
2200 desc = (void *)vm->stats_desc + (i * size_desc);
2201
2202 if (strcmp(desc->name, stat_name))
2203 continue;
2204
2205 read_stat_data(vm->stats_fd, &vm->stats_header, desc,
2206 data, max_elements);
2207
2208 break;
2209 }
2210 }
2211
kvm_arch_vm_post_create(struct kvm_vm * vm)2212 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
2213 {
2214 }
2215
kvm_selftest_arch_init(void)2216 __weak void kvm_selftest_arch_init(void)
2217 {
2218 }
2219
kvm_selftest_init(void)2220 void __attribute((constructor)) kvm_selftest_init(void)
2221 {
2222 /* Tell stdout not to buffer its content. */
2223 setbuf(stdout, NULL);
2224
2225 kvm_selftest_arch_init();
2226 }
2227