Lines Matching full:sync
152 "Unexpected sync ucall, got %lx", in vcpu_worker()
291 struct sync_area *sync; in prepare_vm() local
352 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in prepare_vm()
353 sync->guest_page_size = data->vm->page_size; in prepare_vm()
354 atomic_init(&sync->start_flag, false); in prepare_vm()
355 atomic_init(&sync->exit_flag, false); in prepare_vm()
356 atomic_init(&sync->sync_flag, false); in prepare_vm()
385 static void let_guest_run(struct sync_area *sync) in let_guest_run() argument
387 atomic_store_explicit(&sync->start_flag, true, memory_order_release); in let_guest_run()
392 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_spin_until_start() local
394 while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire)) in guest_spin_until_start()
398 static void make_guest_exit(struct sync_area *sync) in make_guest_exit() argument
400 atomic_store_explicit(&sync->exit_flag, true, memory_order_release); in make_guest_exit()
405 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in _guest_should_exit() local
407 return atomic_load_explicit(&sync->exit_flag, memory_order_acquire); in _guest_should_exit()
418 static noinline void host_perform_sync(struct sync_area *sync) in host_perform_sync() argument
422 atomic_store_explicit(&sync->sync_flag, true, memory_order_release); in host_perform_sync()
423 while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) in host_perform_sync()
431 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_perform_sync() local
439 } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag, in guest_perform_sync()
449 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_move() local
450 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_move()
451 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); in guest_code_test_memslot_move()
465 * No host sync here since the MMIO exits are so expensive in guest_code_test_memslot_move()
478 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_map() local
479 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_map()
510 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_unmap() local
521 * per host sync as otherwise the host will spend in guest_code_test_memslot_unmap()
545 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; in guest_code_test_memslot_rw() local
546 uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); in guest_code_test_memslot_rw()
578 struct sync_area *sync, in test_memslot_move_prepare() argument
602 sync->move_area_ptr = (void *)movetestgpa; in test_memslot_move_prepare()
614 struct sync_area *sync, in test_memslot_move_prepare_active() argument
617 return test_memslot_move_prepare(data, sync, maxslots, true); in test_memslot_move_prepare_active()
621 struct sync_area *sync, in test_memslot_move_prepare_inactive() argument
624 return test_memslot_move_prepare(data, sync, maxslots, false); in test_memslot_move_prepare_inactive()
627 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_move_loop() argument
680 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_map_loop() argument
698 host_perform_sync(sync); in test_memslot_map_loop()
713 host_perform_sync(sync); in test_memslot_map_loop()
719 struct sync_area *sync, in test_memslot_unmap_loop_common() argument
733 host_perform_sync(sync); in test_memslot_unmap_loop_common()
739 host_perform_sync(sync); in test_memslot_unmap_loop_common()
746 struct sync_area *sync) in test_memslot_unmap_loop() argument
753 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); in test_memslot_unmap_loop()
757 struct sync_area *sync) in test_memslot_unmap_loop_chunked() argument
762 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); in test_memslot_unmap_loop_chunked()
765 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_rw_loop() argument
774 host_perform_sync(sync); in test_memslot_rw_loop()
787 host_perform_sync(sync); in test_memslot_rw_loop()
794 bool (*prepare)(struct vm_data *data, struct sync_area *sync,
796 void (*loop)(struct vm_data *data, struct sync_area *sync);
808 struct sync_area *sync; in test_execute() local
819 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in test_execute()
821 !tdata->prepare(data, sync, maxslots)) { in test_execute()
829 let_guest_run(sync); in test_execute()
836 tdata->loop(data, sync); in test_execute()
841 make_guest_exit(sync); in test_execute()