1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * A memslot-related performance benchmark.
4 *
5 * Copyright (C) 2021 Oracle and/or its affiliates.
6 *
7 * Basic guest setup / host vCPU thread code lifted from set_memory_region_test.
8 */
9 #include <pthread.h>
10 #include <sched.h>
11 #include <semaphore.h>
12 #include <stdatomic.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/mman.h>
19 #include <time.h>
20 #include <unistd.h>
21
22 #include <linux/compiler.h>
23 #include <linux/sizes.h>
24
25 #include <test_util.h>
26 #include <kvm_util.h>
27 #include <processor.h>
28 #include <ucall_common.h>
29
30 #define MEM_EXTRA_SIZE SZ_64K
31
32 #define MEM_SIZE (SZ_512M + MEM_EXTRA_SIZE)
33 #define MEM_GPA SZ_256M
34 #define MEM_AUX_GPA MEM_GPA
35 #define MEM_SYNC_GPA MEM_AUX_GPA
36 #define MEM_TEST_GPA (MEM_AUX_GPA + MEM_EXTRA_SIZE)
37 #define MEM_TEST_SIZE (MEM_SIZE - MEM_EXTRA_SIZE)
38
39 /*
40 * 32 MiB is max size that gets well over 100 iterations on 509 slots.
41 * Considering that each slot needs to have at least one page up to
42 * 8194 slots in use can then be tested (although with slightly
43 * limited resolution).
44 */
45 #define MEM_SIZE_MAP (SZ_32M + MEM_EXTRA_SIZE)
46 #define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - MEM_EXTRA_SIZE)
47
48 /*
49 * 128 MiB is min size that fills 32k slots with at least one page in each
50 * while at the same time gets 100+ iterations in such test
51 *
52 * 2 MiB chunk size like a typical huge page
53 */
54 #define MEM_TEST_UNMAP_SIZE SZ_128M
55 #define MEM_TEST_UNMAP_CHUNK_SIZE SZ_2M
56
57 /*
58 * For the move active test the middle of the test area is placed on
59 * a memslot boundary: half lies in the memslot being moved, half in
60 * other memslot(s).
61 *
62 * We have different number of memory slots, excluding the reserved
63 * memory slot 0, on various architectures and configurations. The
64 * memory size in this test is calculated by picking the maximal
65 * last memory slot's memory size, with alignment to the largest
66 * supported page size (64KB). In this way, the selected memory
67 * size for this test is compatible with test_memslot_move_prepare().
68 *
69 * architecture slots memory-per-slot memory-on-last-slot
70 * --------------------------------------------------------------
71 * x86-4KB 32763 16KB 160KB
72 * arm64-4KB 32766 16KB 112KB
73 * arm64-16KB 32766 16KB 112KB
74 * arm64-64KB 8192 64KB 128KB
75 */
76 #define MEM_TEST_MOVE_SIZE (3 * SZ_64K)
77 #define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE)
78 static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
79 "invalid move test region size");
80
81 #define MEM_TEST_VAL_1 0x1122334455667788
82 #define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
83
84 struct vm_data {
85 struct kvm_vm *vm;
86 struct kvm_vcpu *vcpu;
87 pthread_t vcpu_thread;
88 uint32_t nslots;
89 uint64_t npages;
90 uint64_t pages_per_slot;
91 void **hva_slots;
92 bool mmio_ok;
93 uint64_t mmio_gpa_min;
94 uint64_t mmio_gpa_max;
95 };
96
97 struct sync_area {
98 uint32_t guest_page_size;
99 atomic_bool start_flag;
100 atomic_bool exit_flag;
101 atomic_bool sync_flag;
102 void *move_area_ptr;
103 };
104
105 /*
106 * Technically, we need also for the atomic bool to be address-free, which
107 * is recommended, but not strictly required, by C11 for lockless
108 * implementations.
109 * However, in practice both GCC and Clang fulfill this requirement on
110 * all KVM-supported platforms.
111 */
112 static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
113
114 static sem_t vcpu_ready;
115
116 static bool map_unmap_verify;
117 #ifdef __x86_64__
118 static bool disable_slot_zap_quirk;
119 #endif
120
121 static bool verbose;
122 #define pr_info_v(...) \
123 do { \
124 if (verbose) \
125 pr_info(__VA_ARGS__); \
126 } while (0)
127
check_mmio_access(struct vm_data * data,struct kvm_run * run)128 static void check_mmio_access(struct vm_data *data, struct kvm_run *run)
129 {
130 TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit");
131 TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
132 TEST_ASSERT(run->mmio.len == 8,
133 "Unexpected exit mmio size = %u", run->mmio.len);
134 TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min &&
135 run->mmio.phys_addr <= data->mmio_gpa_max,
136 "Unexpected exit mmio address = 0x%llx",
137 run->mmio.phys_addr);
138 }
139
vcpu_worker(void * __data)140 static void *vcpu_worker(void *__data)
141 {
142 struct vm_data *data = __data;
143 struct kvm_vcpu *vcpu = data->vcpu;
144 struct kvm_run *run = vcpu->run;
145 struct ucall uc;
146
147 while (1) {
148 vcpu_run(vcpu);
149
150 switch (get_ucall(vcpu, &uc)) {
151 case UCALL_SYNC:
152 TEST_ASSERT(uc.args[1] == 0,
153 "Unexpected sync ucall, got %lx",
154 (ulong)uc.args[1]);
155 sem_post(&vcpu_ready);
156 continue;
157 case UCALL_NONE:
158 if (run->exit_reason == KVM_EXIT_MMIO)
159 check_mmio_access(data, run);
160 else
161 goto done;
162 break;
163 case UCALL_ABORT:
164 REPORT_GUEST_ASSERT(uc);
165 break;
166 case UCALL_DONE:
167 goto done;
168 default:
169 TEST_FAIL("Unknown ucall %lu", uc.cmd);
170 }
171 }
172
173 done:
174 return NULL;
175 }
176
wait_for_vcpu(void)177 static void wait_for_vcpu(void)
178 {
179 struct timespec ts;
180
181 TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
182 "clock_gettime() failed: %d", errno);
183
184 ts.tv_sec += 2;
185 TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
186 "sem_timedwait() failed: %d", errno);
187 }
188
vm_gpa2hva(struct vm_data * data,uint64_t gpa,uint64_t * rempages)189 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
190 {
191 uint64_t gpage, pgoffs;
192 uint32_t slot, slotoffs;
193 void *base;
194 uint32_t guest_page_size = data->vm->page_size;
195
196 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
197 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size,
198 "Too high gpa to translate");
199 gpa -= MEM_GPA;
200
201 gpage = gpa / guest_page_size;
202 pgoffs = gpa % guest_page_size;
203 slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
204 slotoffs = gpage - (slot * data->pages_per_slot);
205
206 if (rempages) {
207 uint64_t slotpages;
208
209 if (slot == data->nslots - 1)
210 slotpages = data->npages - slot * data->pages_per_slot;
211 else
212 slotpages = data->pages_per_slot;
213
214 TEST_ASSERT(!pgoffs,
215 "Asking for remaining pages in slot but gpa not page aligned");
216 *rempages = slotpages - slotoffs;
217 }
218
219 base = data->hva_slots[slot];
220 return (uint8_t *)base + slotoffs * guest_page_size + pgoffs;
221 }
222
vm_slot2gpa(struct vm_data * data,uint32_t slot)223 static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
224 {
225 uint32_t guest_page_size = data->vm->page_size;
226
227 TEST_ASSERT(slot < data->nslots, "Too high slot number");
228
229 return MEM_GPA + slot * data->pages_per_slot * guest_page_size;
230 }
231
alloc_vm(void)232 static struct vm_data *alloc_vm(void)
233 {
234 struct vm_data *data;
235
236 data = malloc(sizeof(*data));
237 TEST_ASSERT(data, "malloc(vmdata) failed");
238
239 data->vm = NULL;
240 data->vcpu = NULL;
241 data->hva_slots = NULL;
242
243 return data;
244 }
245
check_slot_pages(uint32_t host_page_size,uint32_t guest_page_size,uint64_t pages_per_slot,uint64_t rempages)246 static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size,
247 uint64_t pages_per_slot, uint64_t rempages)
248 {
249 if (!pages_per_slot)
250 return false;
251
252 if ((pages_per_slot * guest_page_size) % host_page_size)
253 return false;
254
255 if ((rempages * guest_page_size) % host_page_size)
256 return false;
257
258 return true;
259 }
260
261
get_max_slots(struct vm_data * data,uint32_t host_page_size)262 static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
263 {
264 uint32_t guest_page_size = data->vm->page_size;
265 uint64_t mempages, pages_per_slot, rempages;
266 uint64_t slots;
267
268 mempages = data->npages;
269 slots = data->nslots;
270 while (--slots > 1) {
271 pages_per_slot = mempages / slots;
272 if (!pages_per_slot)
273 continue;
274
275 rempages = mempages % pages_per_slot;
276 if (check_slot_pages(host_page_size, guest_page_size,
277 pages_per_slot, rempages))
278 return slots + 1; /* slot 0 is reserved */
279 }
280
281 return 0;
282 }
283
prepare_vm(struct vm_data * data,int nslots,uint64_t * maxslots,void * guest_code,uint64_t mem_size,struct timespec * slot_runtime)284 static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
285 void *guest_code, uint64_t mem_size,
286 struct timespec *slot_runtime)
287 {
288 uint64_t mempages, rempages;
289 uint64_t guest_addr;
290 uint32_t slot, host_page_size, guest_page_size;
291 struct timespec tstart;
292 struct sync_area *sync;
293
294 host_page_size = getpagesize();
295 guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
296 mempages = mem_size / guest_page_size;
297
298 data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
299 TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size");
300
301 data->npages = mempages;
302 TEST_ASSERT(data->npages > 1, "Can't test without any memory");
303 data->nslots = nslots;
304 data->pages_per_slot = data->npages / data->nslots;
305 rempages = data->npages % data->nslots;
306 if (!check_slot_pages(host_page_size, guest_page_size,
307 data->pages_per_slot, rempages)) {
308 *maxslots = get_max_slots(data, host_page_size);
309 return false;
310 }
311
312 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
313 TEST_ASSERT(data->hva_slots, "malloc() fail");
314
315 pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
316 data->nslots, data->pages_per_slot, rempages);
317
318 clock_gettime(CLOCK_MONOTONIC, &tstart);
319 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
320 uint64_t npages;
321
322 npages = data->pages_per_slot;
323 if (slot == data->nslots)
324 npages += rempages;
325
326 vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
327 guest_addr, slot, npages,
328 0);
329 guest_addr += npages * guest_page_size;
330 }
331 *slot_runtime = timespec_elapsed(tstart);
332
333 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
334 uint64_t npages;
335 uint64_t gpa;
336
337 npages = data->pages_per_slot;
338 if (slot == data->nslots)
339 npages += rempages;
340
341 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
342 TEST_ASSERT(gpa == guest_addr,
343 "vm_phy_pages_alloc() failed");
344
345 data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
346 memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);
347
348 guest_addr += npages * guest_page_size;
349 }
350
351 virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages);
352
353 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
354 sync->guest_page_size = data->vm->page_size;
355 atomic_init(&sync->start_flag, false);
356 atomic_init(&sync->exit_flag, false);
357 atomic_init(&sync->sync_flag, false);
358
359 data->mmio_ok = false;
360
361 return true;
362 }
363
launch_vm(struct vm_data * data)364 static void launch_vm(struct vm_data *data)
365 {
366 pr_info_v("Launching the test VM\n");
367
368 pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
369
370 /* Ensure the guest thread is spun up. */
371 wait_for_vcpu();
372 }
373
free_vm(struct vm_data * data)374 static void free_vm(struct vm_data *data)
375 {
376 kvm_vm_free(data->vm);
377 free(data->hva_slots);
378 free(data);
379 }
380
wait_guest_exit(struct vm_data * data)381 static void wait_guest_exit(struct vm_data *data)
382 {
383 pthread_join(data->vcpu_thread, NULL);
384 }
385
let_guest_run(struct sync_area * sync)386 static void let_guest_run(struct sync_area *sync)
387 {
388 atomic_store_explicit(&sync->start_flag, true, memory_order_release);
389 }
390
guest_spin_until_start(void)391 static void guest_spin_until_start(void)
392 {
393 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
394
395 while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
396 ;
397 }
398
make_guest_exit(struct sync_area * sync)399 static void make_guest_exit(struct sync_area *sync)
400 {
401 atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
402 }
403
_guest_should_exit(void)404 static bool _guest_should_exit(void)
405 {
406 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
407
408 return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
409 }
410
411 #define guest_should_exit() unlikely(_guest_should_exit())
412
413 /*
414 * noinline so we can easily see how much time the host spends waiting
415 * for the guest.
416 * For the same reason use alarm() instead of polling clock_gettime()
417 * to implement a wait timeout.
418 */
host_perform_sync(struct sync_area * sync)419 static noinline void host_perform_sync(struct sync_area *sync)
420 {
421 alarm(10);
422
423 atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
424 while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
425 ;
426
427 alarm(0);
428 }
429
guest_perform_sync(void)430 static bool guest_perform_sync(void)
431 {
432 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
433 bool expected;
434
435 do {
436 if (guest_should_exit())
437 return false;
438
439 expected = true;
440 } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
441 &expected, false,
442 memory_order_acq_rel,
443 memory_order_relaxed));
444
445 return true;
446 }
447
guest_code_test_memslot_move(void)448 static void guest_code_test_memslot_move(void)
449 {
450 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
451 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
452 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
453
454 GUEST_SYNC(0);
455
456 guest_spin_until_start();
457
458 while (!guest_should_exit()) {
459 uintptr_t ptr;
460
461 for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
462 ptr += page_size)
463 *(uint64_t *)ptr = MEM_TEST_VAL_1;
464
465 /*
466 * No host sync here since the MMIO exits are so expensive
467 * that the host would spend most of its time waiting for
468 * the guest and so instead of measuring memslot move
469 * performance we would measure the performance and
470 * likelihood of MMIO exits
471 */
472 }
473
474 GUEST_DONE();
475 }
476
guest_code_test_memslot_map(void)477 static void guest_code_test_memslot_map(void)
478 {
479 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
480 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
481
482 GUEST_SYNC(0);
483
484 guest_spin_until_start();
485
486 while (1) {
487 uintptr_t ptr;
488
489 for (ptr = MEM_TEST_GPA;
490 ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
491 ptr += page_size)
492 *(uint64_t *)ptr = MEM_TEST_VAL_1;
493
494 if (!guest_perform_sync())
495 break;
496
497 for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
498 ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE;
499 ptr += page_size)
500 *(uint64_t *)ptr = MEM_TEST_VAL_2;
501
502 if (!guest_perform_sync())
503 break;
504 }
505
506 GUEST_DONE();
507 }
508
guest_code_test_memslot_unmap(void)509 static void guest_code_test_memslot_unmap(void)
510 {
511 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
512
513 GUEST_SYNC(0);
514
515 guest_spin_until_start();
516
517 while (1) {
518 uintptr_t ptr = MEM_TEST_GPA;
519
520 /*
521 * We can afford to access (map) just a small number of pages
522 * per host sync as otherwise the host will spend
523 * a significant amount of its time waiting for the guest
524 * (instead of doing unmap operations), so this will
525 * effectively turn this test into a map performance test.
526 *
527 * Just access a single page to be on the safe side.
528 */
529 *(uint64_t *)ptr = MEM_TEST_VAL_1;
530
531 if (!guest_perform_sync())
532 break;
533
534 ptr += MEM_TEST_UNMAP_SIZE / 2;
535 *(uint64_t *)ptr = MEM_TEST_VAL_2;
536
537 if (!guest_perform_sync())
538 break;
539 }
540
541 GUEST_DONE();
542 }
543
guest_code_test_memslot_rw(void)544 static void guest_code_test_memslot_rw(void)
545 {
546 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
547 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
548
549 GUEST_SYNC(0);
550
551 guest_spin_until_start();
552
553 while (1) {
554 uintptr_t ptr;
555
556 for (ptr = MEM_TEST_GPA;
557 ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size)
558 *(uint64_t *)ptr = MEM_TEST_VAL_1;
559
560 if (!guest_perform_sync())
561 break;
562
563 for (ptr = MEM_TEST_GPA + page_size / 2;
564 ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
565 uint64_t val = *(uint64_t *)ptr;
566
567 GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2);
568 *(uint64_t *)ptr = 0;
569 }
570
571 if (!guest_perform_sync())
572 break;
573 }
574
575 GUEST_DONE();
576 }
577
test_memslot_move_prepare(struct vm_data * data,struct sync_area * sync,uint64_t * maxslots,bool isactive)578 static bool test_memslot_move_prepare(struct vm_data *data,
579 struct sync_area *sync,
580 uint64_t *maxslots, bool isactive)
581 {
582 uint32_t guest_page_size = data->vm->page_size;
583 uint64_t movesrcgpa, movetestgpa;
584
585 #ifdef __x86_64__
586 if (disable_slot_zap_quirk)
587 vm_enable_cap(data->vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
588 #endif
589
590 movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
591
592 if (isactive) {
593 uint64_t lastpages;
594
595 vm_gpa2hva(data, movesrcgpa, &lastpages);
596 if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) {
597 *maxslots = 0;
598 return false;
599 }
600 }
601
602 movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1));
603 sync->move_area_ptr = (void *)movetestgpa;
604
605 if (isactive) {
606 data->mmio_ok = true;
607 data->mmio_gpa_min = movesrcgpa;
608 data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
609 }
610
611 return true;
612 }
613
test_memslot_move_prepare_active(struct vm_data * data,struct sync_area * sync,uint64_t * maxslots)614 static bool test_memslot_move_prepare_active(struct vm_data *data,
615 struct sync_area *sync,
616 uint64_t *maxslots)
617 {
618 return test_memslot_move_prepare(data, sync, maxslots, true);
619 }
620
test_memslot_move_prepare_inactive(struct vm_data * data,struct sync_area * sync,uint64_t * maxslots)621 static bool test_memslot_move_prepare_inactive(struct vm_data *data,
622 struct sync_area *sync,
623 uint64_t *maxslots)
624 {
625 return test_memslot_move_prepare(data, sync, maxslots, false);
626 }
627
test_memslot_move_loop(struct vm_data * data,struct sync_area * sync)628 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
629 {
630 uint64_t movesrcgpa;
631
632 movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
633 vm_mem_region_move(data->vm, data->nslots - 1 + 1,
634 MEM_TEST_MOVE_GPA_DEST);
635 vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
636 }
637
test_memslot_do_unmap(struct vm_data * data,uint64_t offsp,uint64_t count)638 static void test_memslot_do_unmap(struct vm_data *data,
639 uint64_t offsp, uint64_t count)
640 {
641 uint64_t gpa, ctr;
642 uint32_t guest_page_size = data->vm->page_size;
643
644 for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) {
645 uint64_t npages;
646 void *hva;
647 int ret;
648
649 hva = vm_gpa2hva(data, gpa, &npages);
650 TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
651 npages = min(npages, count - ctr);
652 ret = madvise(hva, npages * guest_page_size, MADV_DONTNEED);
653 TEST_ASSERT(!ret,
654 "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
655 hva, gpa);
656 ctr += npages;
657 gpa += npages * guest_page_size;
658 }
659 TEST_ASSERT(ctr == count,
660 "madvise(MADV_DONTNEED) should exactly cover all of the requested area");
661 }
662
test_memslot_map_unmap_check(struct vm_data * data,uint64_t offsp,uint64_t valexp)663 static void test_memslot_map_unmap_check(struct vm_data *data,
664 uint64_t offsp, uint64_t valexp)
665 {
666 uint64_t gpa;
667 uint64_t *val;
668 uint32_t guest_page_size = data->vm->page_size;
669
670 if (!map_unmap_verify)
671 return;
672
673 gpa = MEM_TEST_GPA + offsp * guest_page_size;
674 val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
675 TEST_ASSERT(*val == valexp,
676 "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
677 *val, valexp, gpa);
678 *val = 0;
679 }
680
test_memslot_map_loop(struct vm_data * data,struct sync_area * sync)681 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
682 {
683 uint32_t guest_page_size = data->vm->page_size;
684 uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size;
685
686 /*
687 * Unmap the second half of the test area while guest writes to (maps)
688 * the first half.
689 */
690 test_memslot_do_unmap(data, guest_pages / 2, guest_pages / 2);
691
692 /*
693 * Wait for the guest to finish writing the first half of the test
694 * area, verify the written value on the first and the last page of
695 * this area and then unmap it.
696 * Meanwhile, the guest is writing to (mapping) the second half of
697 * the test area.
698 */
699 host_perform_sync(sync);
700 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
701 test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1);
702 test_memslot_do_unmap(data, 0, guest_pages / 2);
703
704
705 /*
706 * Wait for the guest to finish writing the second half of the test
707 * area and verify the written value on the first and the last page
708 * of this area.
709 * The area will be unmapped at the beginning of the next loop
710 * iteration.
711 * Meanwhile, the guest is writing to (mapping) the first half of
712 * the test area.
713 */
714 host_perform_sync(sync);
715 test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
716 test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2);
717 }
718
test_memslot_unmap_loop_common(struct vm_data * data,struct sync_area * sync,uint64_t chunk)719 static void test_memslot_unmap_loop_common(struct vm_data *data,
720 struct sync_area *sync,
721 uint64_t chunk)
722 {
723 uint32_t guest_page_size = data->vm->page_size;
724 uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size;
725 uint64_t ctr;
726
727 /*
728 * Wait for the guest to finish mapping page(s) in the first half
729 * of the test area, verify the written value and then perform unmap
730 * of this area.
731 * Meanwhile, the guest is writing to (mapping) page(s) in the second
732 * half of the test area.
733 */
734 host_perform_sync(sync);
735 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
736 for (ctr = 0; ctr < guest_pages / 2; ctr += chunk)
737 test_memslot_do_unmap(data, ctr, chunk);
738
739 /* Likewise, but for the opposite host / guest areas */
740 host_perform_sync(sync);
741 test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
742 for (ctr = guest_pages / 2; ctr < guest_pages; ctr += chunk)
743 test_memslot_do_unmap(data, ctr, chunk);
744 }
745
test_memslot_unmap_loop(struct vm_data * data,struct sync_area * sync)746 static void test_memslot_unmap_loop(struct vm_data *data,
747 struct sync_area *sync)
748 {
749 uint32_t host_page_size = getpagesize();
750 uint32_t guest_page_size = data->vm->page_size;
751 uint64_t guest_chunk_pages = guest_page_size >= host_page_size ?
752 1 : host_page_size / guest_page_size;
753
754 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
755 }
756
test_memslot_unmap_loop_chunked(struct vm_data * data,struct sync_area * sync)757 static void test_memslot_unmap_loop_chunked(struct vm_data *data,
758 struct sync_area *sync)
759 {
760 uint32_t guest_page_size = data->vm->page_size;
761 uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size;
762
763 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
764 }
765
test_memslot_rw_loop(struct vm_data * data,struct sync_area * sync)766 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
767 {
768 uint64_t gptr;
769 uint32_t guest_page_size = data->vm->page_size;
770
771 for (gptr = MEM_TEST_GPA + guest_page_size / 2;
772 gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size)
773 *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
774
775 host_perform_sync(sync);
776
777 for (gptr = MEM_TEST_GPA;
778 gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) {
779 uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
780 uint64_t val = *vptr;
781
782 TEST_ASSERT(val == MEM_TEST_VAL_1,
783 "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
784 val, gptr);
785 *vptr = 0;
786 }
787
788 host_perform_sync(sync);
789 }
790
791 struct test_data {
792 const char *name;
793 uint64_t mem_size;
794 void (*guest_code)(void);
795 bool (*prepare)(struct vm_data *data, struct sync_area *sync,
796 uint64_t *maxslots);
797 void (*loop)(struct vm_data *data, struct sync_area *sync);
798 };
799
test_execute(int nslots,uint64_t * maxslots,unsigned int maxtime,const struct test_data * tdata,uint64_t * nloops,struct timespec * slot_runtime,struct timespec * guest_runtime)800 static bool test_execute(int nslots, uint64_t *maxslots,
801 unsigned int maxtime,
802 const struct test_data *tdata,
803 uint64_t *nloops,
804 struct timespec *slot_runtime,
805 struct timespec *guest_runtime)
806 {
807 uint64_t mem_size = tdata->mem_size ? : MEM_SIZE;
808 struct vm_data *data;
809 struct sync_area *sync;
810 struct timespec tstart;
811 bool ret = true;
812
813 data = alloc_vm();
814 if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
815 mem_size, slot_runtime)) {
816 ret = false;
817 goto exit_free;
818 }
819
820 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
821 if (tdata->prepare &&
822 !tdata->prepare(data, sync, maxslots)) {
823 ret = false;
824 goto exit_free;
825 }
826
827 launch_vm(data);
828
829 clock_gettime(CLOCK_MONOTONIC, &tstart);
830 let_guest_run(sync);
831
832 while (1) {
833 *guest_runtime = timespec_elapsed(tstart);
834 if (guest_runtime->tv_sec >= maxtime)
835 break;
836
837 tdata->loop(data, sync);
838
839 (*nloops)++;
840 }
841
842 make_guest_exit(sync);
843 wait_guest_exit(data);
844
845 exit_free:
846 free_vm(data);
847
848 return ret;
849 }
850
851 static const struct test_data tests[] = {
852 {
853 .name = "map",
854 .mem_size = MEM_SIZE_MAP,
855 .guest_code = guest_code_test_memslot_map,
856 .loop = test_memslot_map_loop,
857 },
858 {
859 .name = "unmap",
860 .mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
861 .guest_code = guest_code_test_memslot_unmap,
862 .loop = test_memslot_unmap_loop,
863 },
864 {
865 .name = "unmap chunked",
866 .mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
867 .guest_code = guest_code_test_memslot_unmap,
868 .loop = test_memslot_unmap_loop_chunked,
869 },
870 {
871 .name = "move active area",
872 .guest_code = guest_code_test_memslot_move,
873 .prepare = test_memslot_move_prepare_active,
874 .loop = test_memslot_move_loop,
875 },
876 {
877 .name = "move inactive area",
878 .guest_code = guest_code_test_memslot_move,
879 .prepare = test_memslot_move_prepare_inactive,
880 .loop = test_memslot_move_loop,
881 },
882 {
883 .name = "RW",
884 .guest_code = guest_code_test_memslot_rw,
885 .loop = test_memslot_rw_loop
886 },
887 };
888
889 #define NTESTS ARRAY_SIZE(tests)
890
891 struct test_args {
892 int tfirst;
893 int tlast;
894 int nslots;
895 int seconds;
896 int runs;
897 };
898
help(char * name,struct test_args * targs)899 static void help(char *name, struct test_args *targs)
900 {
901 int ctr;
902
903 pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n",
904 name);
905 pr_info(" -h: print this help screen.\n");
906 pr_info(" -v: enable verbose mode (not for benchmarking).\n");
907 pr_info(" -d: enable extra debug checks.\n");
908 pr_info(" -q: Disable memslot zap quirk during memslot move.\n");
909 pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
910 targs->nslots);
911 pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
912 targs->tfirst, NTESTS - 1);
913 pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n",
914 targs->tlast, NTESTS - 1);
915 pr_info(" -l: specify the test length in seconds (currently: %i)\n",
916 targs->seconds);
917 pr_info(" -r: specify the number of runs per test (currently: %i)\n",
918 targs->runs);
919
920 pr_info("\nAvailable tests:\n");
921 for (ctr = 0; ctr < NTESTS; ctr++)
922 pr_info("%d: %s\n", ctr, tests[ctr].name);
923 }
924
check_memory_sizes(void)925 static bool check_memory_sizes(void)
926 {
927 uint32_t host_page_size = getpagesize();
928 uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
929
930 if (host_page_size > SZ_64K || guest_page_size > SZ_64K) {
931 pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n",
932 host_page_size, guest_page_size);
933 return false;
934 }
935
936 if (MEM_SIZE % guest_page_size ||
937 MEM_TEST_SIZE % guest_page_size) {
938 pr_info("invalid MEM_SIZE or MEM_TEST_SIZE\n");
939 return false;
940 }
941
942 if (MEM_SIZE_MAP % guest_page_size ||
943 MEM_TEST_MAP_SIZE % guest_page_size ||
944 (MEM_TEST_MAP_SIZE / guest_page_size) <= 2 ||
945 (MEM_TEST_MAP_SIZE / guest_page_size) % 2) {
946 pr_info("invalid MEM_SIZE_MAP or MEM_TEST_MAP_SIZE\n");
947 return false;
948 }
949
950 if (MEM_TEST_UNMAP_SIZE > MEM_TEST_SIZE ||
951 MEM_TEST_UNMAP_SIZE % guest_page_size ||
952 (MEM_TEST_UNMAP_SIZE / guest_page_size) %
953 (2 * MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size)) {
954 pr_info("invalid MEM_TEST_UNMAP_SIZE or MEM_TEST_UNMAP_CHUNK_SIZE\n");
955 return false;
956 }
957
958 return true;
959 }
960
parse_args(int argc,char * argv[],struct test_args * targs)961 static bool parse_args(int argc, char *argv[],
962 struct test_args *targs)
963 {
964 uint32_t max_mem_slots;
965 int opt;
966
967 while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) {
968 switch (opt) {
969 case 'h':
970 default:
971 help(argv[0], targs);
972 return false;
973 case 'v':
974 verbose = true;
975 break;
976 case 'd':
977 map_unmap_verify = true;
978 break;
979 #ifdef __x86_64__
980 case 'q':
981 disable_slot_zap_quirk = true;
982 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) &
983 KVM_X86_QUIRK_SLOT_ZAP_ALL);
984 break;
985 #endif
986 case 's':
987 targs->nslots = atoi_paranoid(optarg);
988 if (targs->nslots <= 1 && targs->nslots != -1) {
989 pr_info("Slot count cap must be larger than 1 or -1 for no cap\n");
990 return false;
991 }
992 break;
993 case 'f':
994 targs->tfirst = atoi_non_negative("First test", optarg);
995 break;
996 case 'e':
997 targs->tlast = atoi_non_negative("Last test", optarg);
998 if (targs->tlast >= NTESTS) {
999 pr_info("Last test to run has to be non-negative and less than %zu\n",
1000 NTESTS);
1001 return false;
1002 }
1003 break;
1004 case 'l':
1005 targs->seconds = atoi_non_negative("Test length", optarg);
1006 break;
1007 case 'r':
1008 targs->runs = atoi_positive("Runs per test", optarg);
1009 break;
1010 }
1011 }
1012
1013 if (optind < argc) {
1014 help(argv[0], targs);
1015 return false;
1016 }
1017
1018 if (targs->tfirst > targs->tlast) {
1019 pr_info("First test to run cannot be greater than the last test to run\n");
1020 return false;
1021 }
1022
1023 max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
1024 if (max_mem_slots <= 1) {
1025 pr_info("KVM_CAP_NR_MEMSLOTS should be greater than 1\n");
1026 return false;
1027 }
1028
1029 /* Memory slot 0 is reserved */
1030 if (targs->nslots == -1)
1031 targs->nslots = max_mem_slots - 1;
1032 else
1033 targs->nslots = min_t(int, targs->nslots, max_mem_slots) - 1;
1034
1035 pr_info_v("Allowed Number of memory slots: %"PRIu32"\n",
1036 targs->nslots + 1);
1037
1038 return true;
1039 }
1040
1041 struct test_result {
1042 struct timespec slot_runtime, guest_runtime, iter_runtime;
1043 int64_t slottimens, runtimens;
1044 uint64_t nloops;
1045 };
1046
test_loop(const struct test_data * data,const struct test_args * targs,struct test_result * rbestslottime,struct test_result * rbestruntime)1047 static bool test_loop(const struct test_data *data,
1048 const struct test_args *targs,
1049 struct test_result *rbestslottime,
1050 struct test_result *rbestruntime)
1051 {
1052 uint64_t maxslots;
1053 struct test_result result = {};
1054
1055 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
1056 &result.nloops,
1057 &result.slot_runtime, &result.guest_runtime)) {
1058 if (maxslots)
1059 pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n",
1060 maxslots);
1061 else
1062 pr_info("Memslot count may be too high for this test, try adjusting the cap\n");
1063
1064 return false;
1065 }
1066
1067 pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n",
1068 result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec,
1069 result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec);
1070 if (!result.nloops) {
1071 pr_info("No full loops done - too short test time or system too loaded?\n");
1072 return true;
1073 }
1074
1075 result.iter_runtime = timespec_div(result.guest_runtime,
1076 result.nloops);
1077 pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n",
1078 result.nloops,
1079 result.iter_runtime.tv_sec,
1080 result.iter_runtime.tv_nsec);
1081 result.slottimens = timespec_to_ns(result.slot_runtime);
1082 result.runtimens = timespec_to_ns(result.iter_runtime);
1083
1084 /*
1085 * Only rank the slot setup time for tests using the whole test memory
1086 * area so they are comparable
1087 */
1088 if (!data->mem_size &&
1089 (!rbestslottime->slottimens ||
1090 result.slottimens < rbestslottime->slottimens))
1091 *rbestslottime = result;
1092 if (!rbestruntime->runtimens ||
1093 result.runtimens < rbestruntime->runtimens)
1094 *rbestruntime = result;
1095
1096 return true;
1097 }
1098
main(int argc,char * argv[])1099 int main(int argc, char *argv[])
1100 {
1101 struct test_args targs = {
1102 .tfirst = 0,
1103 .tlast = NTESTS - 1,
1104 .nslots = -1,
1105 .seconds = 5,
1106 .runs = 1,
1107 };
1108 struct test_result rbestslottime = {};
1109 int tctr;
1110
1111 if (!check_memory_sizes())
1112 return -1;
1113
1114 if (!parse_args(argc, argv, &targs))
1115 return -1;
1116
1117 for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
1118 const struct test_data *data = &tests[tctr];
1119 unsigned int runctr;
1120 struct test_result rbestruntime = {};
1121
1122 if (tctr > targs.tfirst)
1123 pr_info("\n");
1124
1125 pr_info("Testing %s performance with %i runs, %d seconds each\n",
1126 data->name, targs.runs, targs.seconds);
1127
1128 for (runctr = 0; runctr < targs.runs; runctr++)
1129 if (!test_loop(data, &targs,
1130 &rbestslottime, &rbestruntime))
1131 break;
1132
1133 if (rbestruntime.runtimens)
1134 pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n",
1135 rbestruntime.iter_runtime.tv_sec,
1136 rbestruntime.iter_runtime.tv_nsec,
1137 rbestruntime.nloops);
1138 }
1139
1140 if (rbestslottime.slottimens)
1141 pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n",
1142 rbestslottime.slot_runtime.tv_sec,
1143 rbestslottime.slot_runtime.tv_nsec);
1144
1145 return 0;
1146 }
1147