Lines Matching refs:sync

152 				"Unexpected sync ucall, got %lx",
291 struct sync_area *sync;
352 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
353 sync->guest_page_size = data->vm->page_size;
354 atomic_init(&sync->start_flag, false);
355 atomic_init(&sync->exit_flag, false);
356 atomic_init(&sync->sync_flag, false);
385 static void let_guest_run(struct sync_area *sync)
387 atomic_store_explicit(&sync->start_flag, true, memory_order_release);
392 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
394 while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
398 static void make_guest_exit(struct sync_area *sync)
400 atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
405 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
407 return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
418 static noinline void host_perform_sync(struct sync_area *sync)
422 atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
423 while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
431 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
439 } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
449 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
450 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
451 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
465 * No host sync here since the MMIO exits are so expensive
478 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
479 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
510 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
521 * per host sync as otherwise the host will spend
545 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
546 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
578 struct sync_area *sync,
602 sync->move_area_ptr = (void *)movetestgpa;
614 struct sync_area *sync,
617 return test_memslot_move_prepare(data, sync, maxslots, true);
621 struct sync_area *sync,
624 return test_memslot_move_prepare(data, sync, maxslots, false);
627 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
680 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
698 host_perform_sync(sync);
713 host_perform_sync(sync);
719 struct sync_area *sync,
733 host_perform_sync(sync);
739 host_perform_sync(sync);
746 struct sync_area *sync)
753 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
757 struct sync_area *sync)
762 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
765 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
774 host_perform_sync(sync);
787 host_perform_sync(sync);
794 bool (*prepare)(struct vm_data *data, struct sync_area *sync,
796 void (*loop)(struct vm_data *data, struct sync_area *sync);
808 struct sync_area *sync;
819 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
821 !tdata->prepare(data, sync, maxslots)) {
829 let_guest_run(sync);
836 tdata->loop(data, sync);
841 make_guest_exit(sync);