Lines Matching +full:sync +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0
3 * A memslot-related performance benchmark.
36 #define MEM_TEST_SIZE (MEM_SIZE - MEM_EXTRA_SIZE)
45 #define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - MEM_EXTRA_SIZE)
68 * architecture slots memory-per-slot memory-on-last-slot
69 * --------------------------------------------------------------
70 * x86-4KB 32763 16KB 160KB
71 * arm64-4KB 32766 16KB 112KB
72 * arm64-16KB 32766 16KB 112KB
73 * arm64-64KB 8192 64KB 128KB
105 * Technically, we need also for the atomic bool to be address-free, which
109 * all KVM-supported platforms.
126 TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit"); in check_mmio_access()
127 TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); in check_mmio_access()
128 TEST_ASSERT(run->mmio.len == 8, in check_mmio_access()
129 "Unexpected exit mmio size = %u", run->mmio.len); in check_mmio_access()
130 TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min && in check_mmio_access()
131 run->mmio.phys_addr <= data->mmio_gpa_max, in check_mmio_access()
133 run->mmio.phys_addr); in check_mmio_access()
139 struct kvm_vcpu *vcpu = data->vcpu; in vcpu_worker()
140 struct kvm_run *run = vcpu->run; in vcpu_worker()
143 while (1) { in vcpu_worker()
148 TEST_ASSERT(uc.args[1] == 0, in vcpu_worker()
149 "Unexpected sync ucall, got %lx", in vcpu_worker()
150 (ulong)uc.args[1]); in vcpu_worker()
154 if (run->exit_reason == KVM_EXIT_MMIO) in vcpu_worker()
190 uint32_t guest_page_size = data->vm->page_size; in vm_gpa2hva()
193 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva()
195 gpa -= MEM_GPA; in vm_gpa2hva()
199 slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); in vm_gpa2hva()
200 slotoffs = gpage - (slot * data->pages_per_slot); in vm_gpa2hva()
205 if (slot == data->nslots - 1) in vm_gpa2hva()
206 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva()
208 slotpages = data->pages_per_slot; in vm_gpa2hva()
212 *rempages = slotpages - slotoffs; in vm_gpa2hva()
215 base = data->hva_slots[slot]; in vm_gpa2hva()
221 uint32_t guest_page_size = data->vm->page_size; in vm_slot2gpa()
223 TEST_ASSERT(slot < data->nslots, "Too high slot number"); in vm_slot2gpa()
225 return MEM_GPA + slot * data->pages_per_slot * guest_page_size; in vm_slot2gpa()
235 data->vm = NULL; in alloc_vm()
236 data->vcpu = NULL; in alloc_vm()
237 data->hva_slots = NULL; in alloc_vm()
260 uint32_t guest_page_size = data->vm->page_size; in get_max_slots()
264 mempages = data->npages; in get_max_slots()
265 slots = data->nslots; in get_max_slots()
266 while (--slots > 1) { in get_max_slots()
274 return slots + 1; /* slot 0 is reserved */ in get_max_slots()
288 struct sync_area *sync; in prepare_vm() local
294 data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); in prepare_vm()
295 TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size"); in prepare_vm()
297 data->npages = mempages; in prepare_vm()
298 TEST_ASSERT(data->npages > 1, "Can't test without any memory"); in prepare_vm()
299 data->nslots = nslots; in prepare_vm()
300 data->pages_per_slot = data->npages / data->nslots; in prepare_vm()
301 rempages = data->npages % data->nslots; in prepare_vm()
303 data->pages_per_slot, rempages)) { in prepare_vm()
308 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); in prepare_vm()
309 TEST_ASSERT(data->hva_slots, "malloc() fail"); in prepare_vm()
311 pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", in prepare_vm()
312 data->nslots, data->pages_per_slot, rempages); in prepare_vm()
315 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { in prepare_vm()
318 npages = data->pages_per_slot; in prepare_vm()
319 if (slot == data->nslots) in prepare_vm()
322 vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS, in prepare_vm()
329 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { in prepare_vm()
333 npages = data->pages_per_slot; in prepare_vm()
334 if (slot == data->nslots) in prepare_vm()
337 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot); in prepare_vm()
341 data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr); in prepare_vm()
342 memset(data->hva_slots[slot - 1], 0, npages * guest_page_size); in prepare_vm()
347 virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages); in prepare_vm()
349 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in prepare_vm()
350 sync->guest_page_size = data->vm->page_size; in prepare_vm()
351 atomic_init(&sync->start_flag, false); in prepare_vm()
352 atomic_init(&sync->exit_flag, false); in prepare_vm()
353 atomic_init(&sync->sync_flag, false); in prepare_vm()
355 data->mmio_ok = false; in prepare_vm()
364 pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data); in launch_vm()
372 kvm_vm_free(data->vm); in free_vm()
373 free(data->hva_slots); in free_vm()
379 pthread_join(data->vcpu_thread, NULL); in wait_guest_exit()
382 static void let_guest_run(struct sync_area *sync) in let_guest_run() argument
384 atomic_store_explicit(&sync->start_flag, true, memory_order_release); in let_guest_run()
389 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_spin_until_start() local
391 while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire)) in guest_spin_until_start()
395 static void make_guest_exit(struct sync_area *sync) in make_guest_exit() argument
397 atomic_store_explicit(&sync->exit_flag, true, memory_order_release); in make_guest_exit()
402 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in _guest_should_exit() local
404 return atomic_load_explicit(&sync->exit_flag, memory_order_acquire); in _guest_should_exit()
415 static noinline void host_perform_sync(struct sync_area *sync) in host_perform_sync() argument
419 atomic_store_explicit(&sync->sync_flag, true, memory_order_release); in host_perform_sync()
420 while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) in host_perform_sync()
428 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_perform_sync() local
436 } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag, in guest_perform_sync()
446 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_move() local
447 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_move()
448 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); in guest_code_test_memslot_move()
462 * No host sync here since the MMIO exits are so expensive in guest_code_test_memslot_move()
475 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_map() local
476 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_map()
482 while (1) { in guest_code_test_memslot_map()
507 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_unmap() local
513 while (1) { in guest_code_test_memslot_unmap()
518 * per host sync as otherwise the host will spend in guest_code_test_memslot_unmap()
542 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_rw() local
543 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_rw()
549 while (1) { in guest_code_test_memslot_rw()
575 struct sync_area *sync, in test_memslot_move_prepare() argument
578 uint32_t guest_page_size = data->vm->page_size; in test_memslot_move_prepare()
581 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_prepare()
593 movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1)); in test_memslot_move_prepare()
594 sync->move_area_ptr = (void *)movetestgpa; in test_memslot_move_prepare()
597 data->mmio_ok = true; in test_memslot_move_prepare()
598 data->mmio_gpa_min = movesrcgpa; in test_memslot_move_prepare()
599 data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1; in test_memslot_move_prepare()
606 struct sync_area *sync, in test_memslot_move_prepare_active() argument
609 return test_memslot_move_prepare(data, sync, maxslots, true); in test_memslot_move_prepare_active()
613 struct sync_area *sync, in test_memslot_move_prepare_inactive() argument
616 return test_memslot_move_prepare(data, sync, maxslots, false); in test_memslot_move_prepare_inactive()
619 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_move_loop() argument
623 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_loop()
624 vm_mem_region_move(data->vm, data->nslots - 1 + 1, in test_memslot_move_loop()
626 vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa); in test_memslot_move_loop()
633 uint32_t guest_page_size = data->vm->page_size; in test_memslot_do_unmap()
642 npages = min(npages, count - ctr); in test_memslot_do_unmap()
659 uint32_t guest_page_size = data->vm->page_size; in test_memslot_map_unmap_check()
672 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_map_loop() argument
674 uint32_t guest_page_size = data->vm->page_size; in test_memslot_map_loop()
690 host_perform_sync(sync); in test_memslot_map_loop()
692 test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1); in test_memslot_map_loop()
705 host_perform_sync(sync); in test_memslot_map_loop()
707 test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2); in test_memslot_map_loop()
711 struct sync_area *sync, in test_memslot_unmap_loop_common() argument
714 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop_common()
725 host_perform_sync(sync); in test_memslot_unmap_loop_common()
731 host_perform_sync(sync); in test_memslot_unmap_loop_common()
738 struct sync_area *sync) in test_memslot_unmap_loop() argument
741 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop()
743 1 : host_page_size / guest_page_size; in test_memslot_unmap_loop()
745 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); in test_memslot_unmap_loop()
749 struct sync_area *sync) in test_memslot_unmap_loop_chunked() argument
751 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop_chunked()
754 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); in test_memslot_unmap_loop_chunked()
757 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_rw_loop() argument
760 uint32_t guest_page_size = data->vm->page_size; in test_memslot_rw_loop()
766 host_perform_sync(sync); in test_memslot_rw_loop()
779 host_perform_sync(sync); in test_memslot_rw_loop()
786 bool (*prepare)(struct vm_data *data, struct sync_area *sync,
788 void (*loop)(struct vm_data *data, struct sync_area *sync);
798 uint64_t mem_size = tdata->mem_size ? : MEM_SIZE; in test_execute()
800 struct sync_area *sync; in test_execute() local
805 if (!prepare_vm(data, nslots, maxslots, tdata->guest_code, in test_execute()
811 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in test_execute()
812 if (tdata->prepare && in test_execute()
813 !tdata->prepare(data, sync, maxslots)) { in test_execute()
821 let_guest_run(sync); in test_execute()
823 while (1) { in test_execute()
825 if (guest_runtime->tv_sec >= maxtime) in test_execute()
828 tdata->loop(data, sync); in test_execute()
833 make_guest_exit(sync); in test_execute()
894 …pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r r… in help()
896 pr_info(" -h: print this help screen.\n"); in help()
897 pr_info(" -v: enable verbose mode (not for benchmarking).\n"); in help()
898 pr_info(" -d: enable extra debug checks.\n"); in help()
899 pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n", in help()
900 targs->nslots); in help()
901 pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n", in help()
902 targs->tfirst, NTESTS - 1); in help()
903 pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n", in help()
904 targs->tlast, NTESTS - 1); in help()
905 pr_info(" -l: specify the test length in seconds (currently: %i)\n", in help()
906 targs->seconds); in help()
907 pr_info(" -r: specify the number of runs per test (currently: %i)\n", in help()
908 targs->runs); in help()
957 while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) { in parse_args()
970 targs->nslots = atoi_paranoid(optarg); in parse_args()
971 if (targs->nslots <= 1 && targs->nslots != -1) { in parse_args()
972 pr_info("Slot count cap must be larger than 1 or -1 for no cap\n"); in parse_args()
977 targs->tfirst = atoi_non_negative("First test", optarg); in parse_args()
980 targs->tlast = atoi_non_negative("Last test", optarg); in parse_args()
981 if (targs->tlast >= NTESTS) { in parse_args()
982 pr_info("Last test to run has to be non-negative and less than %zu\n", in parse_args()
988 targs->seconds = atoi_non_negative("Test length", optarg); in parse_args()
991 targs->runs = atoi_positive("Runs per test", optarg); in parse_args()
1001 if (targs->tfirst > targs->tlast) { in parse_args()
1007 if (max_mem_slots <= 1) { in parse_args()
1008 pr_info("KVM_CAP_NR_MEMSLOTS should be greater than 1\n"); in parse_args()
1013 if (targs->nslots == -1) in parse_args()
1014 targs->nslots = max_mem_slots - 1; in parse_args()
1016 targs->nslots = min_t(int, targs->nslots, max_mem_slots) - 1; in parse_args()
1019 targs->nslots + 1); in parse_args()
1038 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, in test_loop()
1054 pr_info("No full loops done - too short test time or system too loaded?\n"); in test_loop()
1071 if (!data->mem_size && in test_loop()
1072 (!rbestslottime->slottimens || in test_loop()
1073 result.slottimens < rbestslottime->slottimens)) in test_loop()
1075 if (!rbestruntime->runtimens || in test_loop()
1076 result.runtimens < rbestruntime->runtimens) in test_loop()
1086 .tlast = NTESTS - 1, in main()
1087 .nslots = -1, in main()
1089 .runs = 1, in main()
1095 return -1; in main()
1098 return -1; in main()
1109 data->name, targs.runs, targs.seconds); in main()