Lines Matching +full:guest +full:- +full:side

1 // SPDX-License-Identifier: GPL-2.0-only
34 * In the following, we will distinguish between two kinds of VMX processes -
37 * newer ones that use the guest memory directly. We will in the following
38 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
39 * new-style VMX'en.
42 * removed for readability) - see below for more details on the transtions:
44 * -------------- NEW -------------
47 * CREATED_NO_MEM <-----------------> CREATED_MEM
49 * | o-----------------------o |
52 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
54 * | o----------------------o |
57 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
60 * -------------> gone <-------------
65 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
67 * - the created was performed by a host endpoint, in which case there is
70 * - the create was initiated by an old-style VMX, that uses
73 * above by the context ID of the creator. A host side is not allowed to
76 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
80 * the host side to attach to it.
86 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
89 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
90 * pair, and attaches to a queue pair previously created by the host side.
92 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
93 * already created by a guest.
95 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
98 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
99 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
105 * when either side of the queue pair detaches. If the guest side detaches
108 * side detaches first, the queue pair will either enter the
109 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
110 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
111 * (e.g., the host detaches while a guest is stunned).
113 * New-style VMX'en will also unmap guest memory, if the guest is
114 * quiesced, e.g., during a snapshot operation. In that case, the guest
129 bool host; /* Host or guest? */
134 } g; /* Used by the guest. */
171 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
172 _qpb->state == VMCIQPB_ATTACHED_MEM || \
173 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
176 * In the queue pair broker, we always use the guest point of view for
257 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, in qp_free_queue()
258 queue->kernel_if->u.g.vas[i], in qp_free_queue()
259 queue->kernel_if->u.g.pas[i]); in qp_free_queue()
277 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); in qp_alloc_queue()
280 if (size > SIZE_MAX - PAGE_SIZE) in qp_alloc_queue()
284 (SIZE_MAX - queue_size) / in qp_alloc_queue()
285 (sizeof(*queue->kernel_if->u.g.pas) + in qp_alloc_queue()
286 sizeof(*queue->kernel_if->u.g.vas))) in qp_alloc_queue()
289 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); in qp_alloc_queue()
290 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); in qp_alloc_queue()
297 queue->q_header = NULL; in qp_alloc_queue()
298 queue->saved_header = NULL; in qp_alloc_queue()
299 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); in qp_alloc_queue()
300 queue->kernel_if->mutex = NULL; in qp_alloc_queue()
301 queue->kernel_if->num_pages = num_pages; in qp_alloc_queue()
302 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); in qp_alloc_queue()
303 queue->kernel_if->u.g.vas = in qp_alloc_queue()
304 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); in qp_alloc_queue()
305 queue->kernel_if->host = false; in qp_alloc_queue()
308 queue->kernel_if->u.g.vas[i] = in qp_alloc_queue()
309 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, in qp_alloc_queue()
310 &queue->kernel_if->u.g.pas[i], in qp_alloc_queue()
312 if (!queue->kernel_if->u.g.vas[i]) { in qp_alloc_queue()
320 queue->q_header = queue->kernel_if->u.g.vas[0]; in qp_alloc_queue()
328 * by traversing the offset -> page translation structure for the queue.
336 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; in qp_memcpy_to_queue_iter()
343 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); in qp_memcpy_to_queue_iter()
347 if (kernel_if->host) in qp_memcpy_to_queue_iter()
348 va = kmap_local_page(kernel_if->u.h.page[page_index]); in qp_memcpy_to_queue_iter()
350 va = kernel_if->u.g.vas[page_index + 1]; in qp_memcpy_to_queue_iter()
353 if (size - bytes_copied > PAGE_SIZE - page_offset) in qp_memcpy_to_queue_iter()
355 to_copy = PAGE_SIZE - page_offset; in qp_memcpy_to_queue_iter()
357 to_copy = size - bytes_copied; in qp_memcpy_to_queue_iter()
361 if (kernel_if->host) in qp_memcpy_to_queue_iter()
366 if (kernel_if->host) in qp_memcpy_to_queue_iter()
376 * by traversing the offset -> page translation structure for the queue.
383 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; in qp_memcpy_from_queue_iter()
390 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); in qp_memcpy_from_queue_iter()
395 if (kernel_if->host) in qp_memcpy_from_queue_iter()
396 va = kmap_local_page(kernel_if->u.h.page[page_index]); in qp_memcpy_from_queue_iter()
398 va = kernel_if->u.g.vas[page_index + 1]; in qp_memcpy_from_queue_iter()
401 if (size - bytes_copied > PAGE_SIZE - page_offset) in qp_memcpy_from_queue_iter()
403 to_copy = PAGE_SIZE - page_offset; in qp_memcpy_from_queue_iter()
405 to_copy = size - bytes_copied; in qp_memcpy_from_queue_iter()
409 if (kernel_if->host) in qp_memcpy_from_queue_iter()
414 if (kernel_if->host) in qp_memcpy_from_queue_iter()
422 * Allocates two list of PPNs --- one for the pages in the produce queue,
442 if (ppn_set->initialized) in qp_alloc_ppn_set()
461 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; in qp_alloc_ppn_set()
465 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; in qp_alloc_ppn_set()
467 ppn_set->num_produce_pages = num_produce_pages; in qp_alloc_ppn_set()
468 ppn_set->num_consume_pages = num_consume_pages; in qp_alloc_ppn_set()
469 ppn_set->produce_ppns = produce_ppns; in qp_alloc_ppn_set()
470 ppn_set->consume_ppns = consume_ppns; in qp_alloc_ppn_set()
471 ppn_set->initialized = true; in qp_alloc_ppn_set()
480 if (ppn_set->initialized) { in qp_free_ppn_set()
482 kfree(ppn_set->produce_ppns); in qp_free_ppn_set()
483 kfree(ppn_set->consume_ppns); in qp_free_ppn_set()
495 memcpy(call_buf, ppn_set->produce_ppns, in qp_populate_ppn_set()
496 ppn_set->num_produce_pages * in qp_populate_ppn_set()
497 sizeof(*ppn_set->produce_ppns)); in qp_populate_ppn_set()
499 ppn_set->num_produce_pages * in qp_populate_ppn_set()
500 sizeof(*ppn_set->produce_ppns), in qp_populate_ppn_set()
501 ppn_set->consume_ppns, in qp_populate_ppn_set()
502 ppn_set->num_consume_pages * in qp_populate_ppn_set()
503 sizeof(*ppn_set->consume_ppns)); in qp_populate_ppn_set()
508 for (i = 0; i < ppn_set->num_produce_pages; i++) in qp_populate_ppn_set()
509 ppns[i] = (u32) ppn_set->produce_ppns[i]; in qp_populate_ppn_set()
511 ppns = &ppns[ppn_set->num_produce_pages]; in qp_populate_ppn_set()
513 for (i = 0; i < ppn_set->num_consume_pages; i++) in qp_populate_ppn_set()
514 ppns[i] = (u32) ppn_set->consume_ppns[i]; in qp_populate_ppn_set()
522 * and kernel interface. This is different from the guest queue allocator,
524 * share those of the guest.
531 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); in qp_host_alloc_queue()
533 if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE)) in qp_host_alloc_queue()
536 if (num_pages > (SIZE_MAX - queue_size) / in qp_host_alloc_queue()
537 sizeof(*queue->kernel_if->u.h.page)) in qp_host_alloc_queue()
540 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); in qp_host_alloc_queue()
547 queue->q_header = NULL; in qp_host_alloc_queue()
548 queue->saved_header = NULL; in qp_host_alloc_queue()
549 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); in qp_host_alloc_queue()
550 queue->kernel_if->host = true; in qp_host_alloc_queue()
551 queue->kernel_if->mutex = NULL; in qp_host_alloc_queue()
552 queue->kernel_if->num_pages = num_pages; in qp_host_alloc_queue()
553 queue->kernel_if->u.h.header_page = in qp_host_alloc_queue()
555 queue->kernel_if->u.h.page = in qp_host_alloc_queue()
556 &queue->kernel_if->u.h.header_page[1]; in qp_host_alloc_queue()
575 * are actually acquired. Queue structure must lie on non-paged memory
582 * Only the host queue has shared state - the guest queues do not in qp_init_queue_mutex()
586 if (produce_q->kernel_if->host) { in qp_init_queue_mutex()
587 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; in qp_init_queue_mutex()
588 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; in qp_init_queue_mutex()
589 mutex_init(produce_q->kernel_if->mutex); in qp_init_queue_mutex()
599 if (produce_q->kernel_if->host) { in qp_cleanup_queue_mutex()
600 produce_q->kernel_if->mutex = NULL; in qp_cleanup_queue_mutex()
601 consume_q->kernel_if->mutex = NULL; in qp_cleanup_queue_mutex()
612 if (queue->kernel_if->host) in qp_acquire_queue_mutex()
613 mutex_lock(queue->kernel_if->mutex); in qp_acquire_queue_mutex()
623 if (queue->kernel_if->host) in qp_release_queue_mutex()
624 mutex_unlock(queue->kernel_if->mutex); in qp_release_queue_mutex()
659 produce_q->kernel_if->num_pages, in qp_host_get_user_memory()
661 produce_q->kernel_if->u.h.header_page); in qp_host_get_user_memory()
662 if (retval < (int)produce_q->kernel_if->num_pages) { in qp_host_get_user_memory()
666 qp_release_pages(produce_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
673 consume_q->kernel_if->num_pages, in qp_host_get_user_memory()
675 consume_q->kernel_if->u.h.header_page); in qp_host_get_user_memory()
676 if (retval < (int)consume_q->kernel_if->num_pages) { in qp_host_get_user_memory()
680 qp_release_pages(consume_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
682 qp_release_pages(produce_q->kernel_if->u.h.header_page, in qp_host_get_user_memory()
683 produce_q->kernel_if->num_pages, false); in qp_host_get_user_memory()
708 produce_uva = page_store->pages; in qp_host_register_user_memory()
709 consume_uva = page_store->pages + in qp_host_register_user_memory()
710 produce_q->kernel_if->num_pages * PAGE_SIZE; in qp_host_register_user_memory()
723 qp_release_pages(produce_q->kernel_if->u.h.header_page, in qp_host_unregister_user_memory()
724 produce_q->kernel_if->num_pages, true); in qp_host_unregister_user_memory()
725 memset(produce_q->kernel_if->u.h.header_page, 0, in qp_host_unregister_user_memory()
726 sizeof(*produce_q->kernel_if->u.h.header_page) * in qp_host_unregister_user_memory()
727 produce_q->kernel_if->num_pages); in qp_host_unregister_user_memory()
728 qp_release_pages(consume_q->kernel_if->u.h.header_page, in qp_host_unregister_user_memory()
729 consume_q->kernel_if->num_pages, true); in qp_host_unregister_user_memory()
730 memset(consume_q->kernel_if->u.h.header_page, 0, in qp_host_unregister_user_memory()
731 sizeof(*consume_q->kernel_if->u.h.header_page) * in qp_host_unregister_user_memory()
732 consume_q->kernel_if->num_pages); in qp_host_unregister_user_memory()
748 if (!produce_q->q_header || !consume_q->q_header) { in qp_host_map_queues()
751 if (produce_q->q_header != consume_q->q_header) in qp_host_map_queues()
754 if (produce_q->kernel_if->u.h.header_page == NULL || in qp_host_map_queues()
755 *produce_q->kernel_if->u.h.header_page == NULL) in qp_host_map_queues()
758 headers[0] = *produce_q->kernel_if->u.h.header_page; in qp_host_map_queues()
759 headers[1] = *consume_q->kernel_if->u.h.header_page; in qp_host_map_queues()
761 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); in qp_host_map_queues()
762 if (produce_q->q_header != NULL) { in qp_host_map_queues()
763 consume_q->q_header = in qp_host_map_queues()
765 produce_q->q_header + in qp_host_map_queues()
787 if (produce_q->q_header) { in qp_host_unmap_queues()
788 if (produce_q->q_header < consume_q->q_header) in qp_host_unmap_queues()
789 vunmap(produce_q->q_header); in qp_host_unmap_queues()
791 vunmap(consume_q->q_header); in qp_host_unmap_queues()
793 produce_q->q_header = NULL; in qp_host_unmap_queues()
794 consume_q->q_header = NULL; in qp_host_unmap_queues()
812 list_for_each_entry(entry, &qp_list->head, list_item) { in qp_list_find()
813 if (vmci_handle_is_equal(entry->handle, handle)) in qp_list_find()
861 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); in qp_notify_peer_local()
900 entry->qp.peer = peer; in qp_guest_endpoint_create()
901 entry->qp.flags = flags; in qp_guest_endpoint_create()
902 entry->qp.produce_size = produce_size; in qp_guest_endpoint_create()
903 entry->qp.consume_size = consume_size; in qp_guest_endpoint_create()
904 entry->qp.ref_count = 0; in qp_guest_endpoint_create()
905 entry->num_ppns = num_ppns; in qp_guest_endpoint_create()
906 entry->produce_q = produce_q; in qp_guest_endpoint_create()
907 entry->consume_q = consume_q; in qp_guest_endpoint_create()
908 INIT_LIST_HEAD(&entry->qp.list_item); in qp_guest_endpoint_create()
911 result = vmci_resource_add(&entry->resource, in qp_guest_endpoint_create()
914 entry->qp.handle = vmci_resource_handle(&entry->resource); in qp_guest_endpoint_create()
916 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { in qp_guest_endpoint_create()
931 qp_free_ppn_set(&entry->ppn_set); in qp_guest_endpoint_destroy()
932 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in qp_guest_endpoint_destroy()
933 qp_free_queue(entry->produce_q, entry->qp.produce_size); in qp_guest_endpoint_destroy()
934 qp_free_queue(entry->consume_q, entry->qp.consume_size); in qp_guest_endpoint_destroy()
936 vmci_resource_remove(&entry->resource); in qp_guest_endpoint_destroy()
943 * supporting a guest device.
952 if (!entry || entry->num_ppns <= 2) in qp_alloc_hypercall()
957 (size_t) entry->num_ppns * ppn_size; in qp_alloc_hypercall()
962 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, in qp_alloc_hypercall()
964 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; in qp_alloc_hypercall()
965 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; in qp_alloc_hypercall()
966 alloc_msg->handle = entry->qp.handle; in qp_alloc_hypercall()
967 alloc_msg->peer = entry->qp.peer; in qp_alloc_hypercall()
968 alloc_msg->flags = entry->qp.flags; in qp_alloc_hypercall()
969 alloc_msg->produce_size = entry->qp.produce_size; in qp_alloc_hypercall()
970 alloc_msg->consume_size = entry->qp.consume_size; in qp_alloc_hypercall()
971 alloc_msg->num_ppns = entry->num_ppns; in qp_alloc_hypercall()
974 &entry->ppn_set); in qp_alloc_hypercall()
976 result = vmci_send_datagram(&alloc_msg->hdr); in qp_alloc_hypercall()
985 * supporting a guest device.
1006 list_add(&entry->list_item, &qp_list->head); in qp_list_add_entry()
1016 list_del(&entry->list_item); in qp_list_remove_entry()
1037 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { in qp_detatch_guest_work()
1040 if (entry->qp.ref_count > 1) { in qp_detatch_guest_work()
1053 * We failed to notify a non-local queuepair. in qp_detatch_guest_work()
1072 entry->qp.ref_count--; in qp_detatch_guest_work()
1073 if (entry->qp.ref_count == 0) in qp_detatch_guest_work()
1074 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); in qp_detatch_guest_work()
1078 ref_count = entry->qp.ref_count; in qp_detatch_guest_work()
1090 * pair guest endpoint. Allocates physical pages for the queue
1118 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { in qp_alloc_guest_work()
1120 if (queue_pair_entry->qp.ref_count > 1) { in qp_alloc_guest_work()
1126 if (queue_pair_entry->qp.produce_size != consume_size || in qp_alloc_guest_work()
1127 queue_pair_entry->qp.consume_size != in qp_alloc_guest_work()
1129 queue_pair_entry->qp.flags != in qp_alloc_guest_work()
1145 my_produce_q = queue_pair_entry->consume_q; in qp_alloc_guest_work()
1146 my_consume_q = queue_pair_entry->produce_q; in qp_alloc_guest_work()
1179 &queue_pair_entry->ppn_set); in qp_alloc_guest_work()
1189 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { in qp_alloc_guest_work()
1198 * attach-only flag cannot exist during create. We in qp_alloc_guest_work()
1202 if (queue_pair_entry->qp.handle.context != context_id || in qp_alloc_guest_work()
1203 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && in qp_alloc_guest_work()
1204 queue_pair_entry->qp.peer != context_id)) { in qp_alloc_guest_work()
1209 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { in qp_alloc_guest_work()
1224 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); in qp_alloc_guest_work()
1227 queue_pair_entry->qp.ref_count++; in qp_alloc_guest_work()
1228 *handle = queue_pair_entry->qp.handle; in qp_alloc_guest_work()
1234 * queue pair create. For non-local queue pairs, the in qp_alloc_guest_work()
1237 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && in qp_alloc_guest_work()
1238 queue_pair_entry->qp.ref_count == 1) { in qp_alloc_guest_work()
1239 vmci_q_header_init((*produce_q)->q_header, *handle); in qp_alloc_guest_work()
1240 vmci_q_header_init((*consume_q)->q_header, *handle); in qp_alloc_guest_work()
1268 * If the creator is a guest, it will associate a VMX virtual address range
1327 * The queue pair broker entry stores values from the guest in qp_broker_create()
1328 * point of view, so a creating host side endpoint should swap in qp_broker_create()
1329 * produce and consume values -- unless it is a local queue in qp_broker_create()
1341 entry->qp.handle = handle; in qp_broker_create()
1342 entry->qp.peer = peer; in qp_broker_create()
1343 entry->qp.flags = flags; in qp_broker_create()
1344 entry->qp.produce_size = guest_produce_size; in qp_broker_create()
1345 entry->qp.consume_size = guest_consume_size; in qp_broker_create()
1346 entry->qp.ref_count = 1; in qp_broker_create()
1347 entry->create_id = context_id; in qp_broker_create()
1348 entry->attach_id = VMCI_INVALID_ID; in qp_broker_create()
1349 entry->state = VMCIQPB_NEW; in qp_broker_create()
1350 entry->require_trusted_attach = in qp_broker_create()
1351 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); in qp_broker_create()
1352 entry->created_by_trusted = in qp_broker_create()
1354 entry->vmci_page_files = false; in qp_broker_create()
1355 entry->wakeup_cb = wakeup_cb; in qp_broker_create()
1356 entry->client_data = client_data; in qp_broker_create()
1357 entry->produce_q = qp_host_alloc_queue(guest_produce_size); in qp_broker_create()
1358 if (entry->produce_q == NULL) { in qp_broker_create()
1362 entry->consume_q = qp_host_alloc_queue(guest_consume_size); in qp_broker_create()
1363 if (entry->consume_q == NULL) { in qp_broker_create()
1368 qp_init_queue_mutex(entry->produce_q, entry->consume_q); in qp_broker_create()
1370 INIT_LIST_HEAD(&entry->qp.list_item); in qp_broker_create()
1375 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), in qp_broker_create()
1377 if (entry->local_mem == NULL) { in qp_broker_create()
1381 entry->state = VMCIQPB_CREATED_MEM; in qp_broker_create()
1382 entry->produce_q->q_header = entry->local_mem; in qp_broker_create()
1383 tmp = (u8 *)entry->local_mem + PAGE_SIZE * in qp_broker_create()
1384 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); in qp_broker_create()
1385 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; in qp_broker_create()
1389 * need for the kernel side to do that. in qp_broker_create()
1392 entry->produce_q, in qp_broker_create()
1393 entry->consume_q); in qp_broker_create()
1397 entry->state = VMCIQPB_CREATED_MEM; in qp_broker_create()
1401 * side create (in which case we are waiting for the in qp_broker_create()
1402 * guest side to supply the memory) or an old style in qp_broker_create()
1406 entry->state = VMCIQPB_CREATED_NO_MEM; in qp_broker_create()
1409 qp_list_add_entry(&qp_broker_list, &entry->qp); in qp_broker_create()
1414 result = vmci_resource_add(&entry->resource, in qp_broker_create()
1423 entry->qp.handle = vmci_resource_handle(&entry->resource); in qp_broker_create()
1425 vmci_q_header_init(entry->produce_q->q_header, in qp_broker_create()
1426 entry->qp.handle); in qp_broker_create()
1427 vmci_q_header_init(entry->consume_q->q_header, in qp_broker_create()
1428 entry->qp.handle); in qp_broker_create()
1431 vmci_ctx_qp_create(context, entry->qp.handle); in qp_broker_create()
1437 qp_host_free_queue(entry->produce_q, guest_produce_size); in qp_broker_create()
1438 qp_host_free_queue(entry->consume_q, guest_consume_size); in qp_broker_create()
1475 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); in qp_notify_peer()
1494 * If the attacher is a guest, it will associate a VMX virtual address
1505 * well, since the page store information is already set by the guest.
1527 if (entry->state != VMCIQPB_CREATED_NO_MEM && in qp_broker_attach()
1528 entry->state != VMCIQPB_CREATED_MEM) in qp_broker_attach()
1532 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || in qp_broker_attach()
1533 context_id != entry->create_id) { in qp_broker_attach()
1536 } else if (context_id == entry->create_id || in qp_broker_attach()
1537 context_id == entry->attach_id) { in qp_broker_attach()
1542 VMCI_CONTEXT_IS_VM(entry->create_id)) in qp_broker_attach()
1549 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && in qp_broker_attach()
1550 !entry->created_by_trusted) in qp_broker_attach()
1557 if (entry->require_trusted_attach && in qp_broker_attach()
1565 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) in qp_broker_attach()
1568 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { in qp_broker_attach()
1586 create_context = vmci_ctx_get(entry->create_id); in qp_broker_attach()
1594 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) in qp_broker_attach()
1599 * The queue pair broker entry stores values from the guest in qp_broker_attach()
1600 * point of view, so an attaching guest should match the values in qp_broker_attach()
1604 if (entry->qp.produce_size != produce_size || in qp_broker_attach()
1605 entry->qp.consume_size != consume_size) { in qp_broker_attach()
1608 } else if (entry->qp.produce_size != consume_size || in qp_broker_attach()
1609 entry->qp.consume_size != produce_size) { in qp_broker_attach()
1615 * If a guest attached to a queue pair, it will supply in qp_broker_attach()
1627 if (entry->state != VMCIQPB_CREATED_NO_MEM) in qp_broker_attach()
1632 * Patch up host state to point to guest in qp_broker_attach()
1635 * need for the kernel side to do that. in qp_broker_attach()
1639 entry->produce_q, in qp_broker_attach()
1640 entry->consume_q); in qp_broker_attach()
1644 entry->state = VMCIQPB_ATTACHED_MEM; in qp_broker_attach()
1646 entry->state = VMCIQPB_ATTACHED_NO_MEM; in qp_broker_attach()
1648 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { in qp_broker_attach()
1650 * The host side is attempting to attach to a queue in qp_broker_attach()
1658 /* The host side has successfully attached to a queue pair. */ in qp_broker_attach()
1659 entry->state = VMCIQPB_ATTACHED_MEM; in qp_broker_attach()
1662 if (entry->state == VMCIQPB_ATTACHED_MEM) { in qp_broker_attach()
1664 qp_notify_peer(true, entry->qp.handle, context_id, in qp_broker_attach()
1665 entry->create_id); in qp_broker_attach()
1668 entry->create_id, entry->qp.handle.context, in qp_broker_attach()
1669 entry->qp.handle.resource); in qp_broker_attach()
1672 entry->attach_id = context_id; in qp_broker_attach()
1673 entry->qp.ref_count++; in qp_broker_attach()
1675 entry->wakeup_cb = wakeup_cb; in qp_broker_attach()
1676 entry->client_data = client_data; in qp_broker_attach()
1684 vmci_ctx_qp_create(context, entry->qp.handle); in qp_broker_attach()
1727 * In the initial argument check, we ensure that non-vmkernel hosts in qp_broker_alloc()
1807 *produce_q = entry->consume_q; in qp_alloc_host_work()
1808 *consume_q = entry->produce_q; in qp_alloc_host_work()
1810 *produce_q = entry->produce_q; in qp_alloc_host_work()
1811 *consume_q = entry->consume_q; in qp_alloc_host_work()
1814 *handle = vmci_resource_handle(&entry->resource); in qp_alloc_host_work()
1826 * arguments. The real work is done in the host or guest
1877 * Real work is done in the host or guest specific function.
1896 if (!list_empty(&qp_list->head)) { in qp_list_get_head()
1898 list_first_entry(&qp_list->head, struct qp_entry, in qp_list_get_head()
1977 * We only support guest to host queue pairs, so the VMX must in vmci_qp_broker_set_page_store()
2005 if (entry->create_id != context_id && in vmci_qp_broker_set_page_store()
2006 (entry->create_id != VMCI_HOST_CONTEXT_ID || in vmci_qp_broker_set_page_store()
2007 entry->attach_id != context_id)) { in vmci_qp_broker_set_page_store()
2012 if (entry->state != VMCIQPB_CREATED_NO_MEM && in vmci_qp_broker_set_page_store()
2013 entry->state != VMCIQPB_ATTACHED_NO_MEM) { in vmci_qp_broker_set_page_store()
2019 entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2023 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in vmci_qp_broker_set_page_store()
2025 qp_host_unregister_user_memory(entry->produce_q, in vmci_qp_broker_set_page_store()
2026 entry->consume_q); in vmci_qp_broker_set_page_store()
2030 if (entry->state == VMCIQPB_CREATED_NO_MEM) in vmci_qp_broker_set_page_store()
2031 entry->state = VMCIQPB_CREATED_MEM; in vmci_qp_broker_set_page_store()
2033 entry->state = VMCIQPB_ATTACHED_MEM; in vmci_qp_broker_set_page_store()
2035 entry->vmci_page_files = true; in vmci_qp_broker_set_page_store()
2037 if (entry->state == VMCIQPB_ATTACHED_MEM) { in vmci_qp_broker_set_page_store()
2039 qp_notify_peer(true, handle, context_id, entry->create_id); in vmci_qp_broker_set_page_store()
2042 entry->create_id, entry->qp.handle.context, in vmci_qp_broker_set_page_store()
2043 entry->qp.handle.resource); in vmci_qp_broker_set_page_store()
2055 * entry. Should be used when guest memory becomes available
2056 * again, or the guest detaches.
2060 entry->produce_q->saved_header = NULL; in qp_reset_saved_headers()
2061 entry->consume_q->saved_header = NULL; in qp_reset_saved_headers()
2071 * When a guest endpoint detaches, it will unmap and unregister the guest
2077 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2112 if (context_id != entry->create_id && context_id != entry->attach_id) { in vmci_qp_broker_detach()
2117 if (context_id == entry->create_id) { in vmci_qp_broker_detach()
2118 peer_id = entry->attach_id; in vmci_qp_broker_detach()
2119 entry->create_id = VMCI_INVALID_ID; in vmci_qp_broker_detach()
2121 peer_id = entry->create_id; in vmci_qp_broker_detach()
2122 entry->attach_id = VMCI_INVALID_ID; in vmci_qp_broker_detach()
2124 entry->qp.ref_count--; in vmci_qp_broker_detach()
2126 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; in vmci_qp_broker_detach()
2139 qp_acquire_queue_mutex(entry->produce_q); in vmci_qp_broker_detach()
2140 headers_mapped = entry->produce_q->q_header || in vmci_qp_broker_detach()
2141 entry->consume_q->q_header; in vmci_qp_broker_detach()
2145 entry->produce_q, in vmci_qp_broker_detach()
2146 entry->consume_q); in vmci_qp_broker_detach()
2152 qp_host_unregister_user_memory(entry->produce_q, in vmci_qp_broker_detach()
2153 entry->consume_q); in vmci_qp_broker_detach()
2160 qp_release_queue_mutex(entry->produce_q); in vmci_qp_broker_detach()
2162 if (!headers_mapped && entry->wakeup_cb) in vmci_qp_broker_detach()
2163 entry->wakeup_cb(entry->client_data); in vmci_qp_broker_detach()
2166 if (entry->wakeup_cb) { in vmci_qp_broker_detach()
2167 entry->wakeup_cb = NULL; in vmci_qp_broker_detach()
2168 entry->client_data = NULL; in vmci_qp_broker_detach()
2172 if (entry->qp.ref_count == 0) { in vmci_qp_broker_detach()
2173 qp_list_remove_entry(&qp_broker_list, &entry->qp); in vmci_qp_broker_detach()
2176 kfree(entry->local_mem); in vmci_qp_broker_detach()
2178 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); in vmci_qp_broker_detach()
2179 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); in vmci_qp_broker_detach()
2180 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); in vmci_qp_broker_detach()
2182 vmci_resource_remove(&entry->resource); in vmci_qp_broker_detach()
2191 entry->state = VMCIQPB_SHUTDOWN_MEM; in vmci_qp_broker_detach()
2193 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; in vmci_qp_broker_detach()
2208 * reference to the queue pair guest memory. This is usually
2209 * called when a guest is unquiesced and the VMX is allowed to
2210 * map guest memory once again.
2241 if (context_id != entry->create_id && context_id != entry->attach_id) { in vmci_qp_broker_map()
2253 page_store.len = QPE_NUM_PAGES(entry->qp); in vmci_qp_broker_map()
2255 qp_acquire_queue_mutex(entry->produce_q); in vmci_qp_broker_map()
2259 entry->produce_q, in vmci_qp_broker_map()
2260 entry->consume_q); in vmci_qp_broker_map()
2261 qp_release_queue_mutex(entry->produce_q); in vmci_qp_broker_map()
2265 entry->state++; in vmci_qp_broker_map()
2267 if (entry->wakeup_cb) in vmci_qp_broker_map()
2268 entry->wakeup_cb(entry->client_data); in vmci_qp_broker_map()
2279 * entry. Should be used when guest memory is unmapped.
2281 * VMCI_SUCCESS on success, appropriate error code if guest memory
2288 if (entry->produce_q->saved_header != NULL && in qp_save_headers()
2289 entry->consume_q->saved_header != NULL) { in qp_save_headers()
2299 if (NULL == entry->produce_q->q_header || in qp_save_headers()
2300 NULL == entry->consume_q->q_header) { in qp_save_headers()
2301 result = qp_host_map_queues(entry->produce_q, entry->consume_q); in qp_save_headers()
2306 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, in qp_save_headers()
2307 sizeof(entry->saved_produce_q)); in qp_save_headers()
2308 entry->produce_q->saved_header = &entry->saved_produce_q; in qp_save_headers()
2309 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, in qp_save_headers()
2310 sizeof(entry->saved_consume_q)); in qp_save_headers()
2311 entry->consume_q->saved_header = &entry->saved_consume_q; in qp_save_headers()
2317 * Removes all references to the guest memory of a given queue pair, and
2319 * called when a VM is being quiesced where access to guest memory should
2351 if (context_id != entry->create_id && context_id != entry->attach_id) { in vmci_qp_broker_unmap()
2358 qp_acquire_queue_mutex(entry->produce_q); in vmci_qp_broker_unmap()
2364 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); in vmci_qp_broker_unmap()
2368 * unmap the guest memory, so we invalidate the previously in vmci_qp_broker_unmap()
2373 qp_host_unregister_user_memory(entry->produce_q, in vmci_qp_broker_unmap()
2374 entry->consume_q); in vmci_qp_broker_unmap()
2379 entry->state--; in vmci_qp_broker_unmap()
2381 qp_release_queue_mutex(entry->produce_q); in vmci_qp_broker_unmap()
2392 * Destroys all guest queue pair endpoints. If active guest queue
2408 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) in vmci_qp_guest_endpoints_exit()
2409 qp_detatch_hypercall(entry->handle); in vmci_qp_guest_endpoints_exit()
2412 entry->ref_count = 0; in vmci_qp_guest_endpoints_exit()
2424 * Note: Non-blocking on the host side is currently only implemented in ESX.
2425 * Since non-blocking isn't yet implemented on the host personality we
2431 qp_acquire_queue_mutex(qpair->produce_q); in qp_lock()
2440 qp_release_queue_mutex(qpair->produce_q); in qp_unlock()
2452 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { in qp_map_queue_headers()
2455 return (produce_q->saved_header && in qp_map_queue_headers()
2456 consume_q->saved_header) ? in qp_map_queue_headers()
2466 * headers of a given queue pair. If the guest memory of the
2476 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); in qp_get_queue_headers()
2478 *produce_q_header = qpair->produce_q->q_header; in qp_get_queue_headers()
2479 *consume_q_header = qpair->consume_q->q_header; in qp_get_queue_headers()
2480 } else if (qpair->produce_q->saved_header && in qp_get_queue_headers()
2481 qpair->consume_q->saved_header) { in qp_get_queue_headers()
2482 *produce_q_header = qpair->produce_q->saved_header; in qp_get_queue_headers()
2483 *consume_q_header = qpair->consume_q->saved_header; in qp_get_queue_headers()
2500 while (qpair->blocked > 0) { in qp_wakeup_cb()
2501 qpair->blocked--; in qp_wakeup_cb()
2502 qpair->generation++; in qp_wakeup_cb()
2503 wake_up(&qpair->event); in qp_wakeup_cb()
2512 * ready for host side access. Returns true when thread is
2519 qpair->blocked++; in qp_wait_for_ready_queue()
2520 generation = qpair->generation; in qp_wait_for_ready_queue()
2522 wait_event(qpair->event, generation != qpair->generation); in qp_wait_for_ready_queue()
2531 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2555 free_space = vmci_q_header_free_space(produce_q->q_header, in qp_enqueue_locked()
2556 consume_q->q_header, in qp_enqueue_locked()
2565 tail = vmci_q_header_producer_tail(produce_q->q_header); in qp_enqueue_locked()
2571 const size_t tmp = (size_t) (produce_q_size - tail); in qp_enqueue_locked()
2576 written - tmp); in qp_enqueue_locked()
2588 vmci_q_header_add_producer_tail(produce_q->q_header, written, in qp_enqueue_locked()
2596 * Assumes the queue->mutex has been acquired.
2603 * Side effects:
2622 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, in qp_dequeue_locked()
2623 produce_q->q_header, in qp_dequeue_locked()
2638 head = vmci_q_header_consumer_head(produce_q->q_header); in qp_dequeue_locked()
2644 const size_t tmp = (size_t) (consume_q_size - head); in qp_dequeue_locked()
2649 read - tmp); in qp_dequeue_locked()
2657 vmci_q_header_add_consumer_head(produce_q->q_header, in qp_dequeue_locked()
2664 * vmci_qpair_alloc() - Allocates a queue pair.
2698 * allocated to queuepairs for a guest. However, we try to in vmci_qpair_alloc()
2701 * separately, which means rather than fail, the guest will in vmci_qpair_alloc()
2729 my_qpair->produce_q_size = produce_qsize; in vmci_qpair_alloc()
2730 my_qpair->consume_q_size = consume_qsize; in vmci_qpair_alloc()
2731 my_qpair->peer = peer; in vmci_qpair_alloc()
2732 my_qpair->flags = flags; in vmci_qpair_alloc()
2733 my_qpair->priv_flags = priv_flags; in vmci_qpair_alloc()
2739 my_qpair->guest_endpoint = false; in vmci_qpair_alloc()
2741 my_qpair->blocked = 0; in vmci_qpair_alloc()
2742 my_qpair->generation = 0; in vmci_qpair_alloc()
2743 init_waitqueue_head(&my_qpair->event); in vmci_qpair_alloc()
2748 my_qpair->guest_endpoint = true; in vmci_qpair_alloc()
2752 &my_qpair->produce_q, in vmci_qpair_alloc()
2753 my_qpair->produce_q_size, in vmci_qpair_alloc()
2754 &my_qpair->consume_q, in vmci_qpair_alloc()
2755 my_qpair->consume_q_size, in vmci_qpair_alloc()
2756 my_qpair->peer, in vmci_qpair_alloc()
2757 my_qpair->flags, in vmci_qpair_alloc()
2758 my_qpair->priv_flags, in vmci_qpair_alloc()
2759 my_qpair->guest_endpoint, in vmci_qpair_alloc()
2768 my_qpair->handle = *handle; in vmci_qpair_alloc()
2775 * vmci_qpair_detach() - Detatches the client from a queue pair.
2791 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); in vmci_qpair_detach()
2794 * The guest can fail to detach for a number of reasons, and in vmci_qpair_detach()
2804 old_qpair->handle = VMCI_INVALID_HANDLE; in vmci_qpair_detach()
2805 old_qpair->peer = VMCI_INVALID_ID; in vmci_qpair_detach()
2814 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2842 ((producer_tail && *producer_tail >= qpair->produce_q_size) || in vmci_qpair_get_produce_indexes()
2843 (consumer_head && *consumer_head >= qpair->produce_q_size))) in vmci_qpair_get_produce_indexes()
2851 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer.
2879 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || in vmci_qpair_get_consume_indexes()
2880 (producer_head && *producer_head >= qpair->consume_q_size))) in vmci_qpair_get_consume_indexes()
2888 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2911 qpair->produce_q_size); in vmci_qpair_produce_free_space()
2922 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
2945 qpair->consume_q_size); in vmci_qpair_consume_free_space()
2956 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
2980 qpair->produce_q_size); in vmci_qpair_produce_buf_ready()
2991 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
3015 qpair->consume_q_size); in vmci_qpair_consume_buf_ready()
3026 * vmci_qpair_enqueue() - Throw data on the queue.
3052 result = qp_enqueue_locked(qpair->produce_q, in vmci_qpair_enqueue()
3053 qpair->consume_q, in vmci_qpair_enqueue()
3054 qpair->produce_q_size, in vmci_qpair_enqueue()
3070 * vmci_qpair_dequeue() - Get data from the queue.
3096 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_dequeue()
3097 qpair->consume_q, in vmci_qpair_dequeue()
3098 qpair->consume_q_size, in vmci_qpair_dequeue()
3114 * vmci_qpair_peek() - Peek at the data in the queue.
3141 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_peek()
3142 qpair->consume_q, in vmci_qpair_peek()
3143 qpair->consume_q_size, in vmci_qpair_peek()
3159 * vmci_qpair_enquev() - Throw data on the queue using iov.
3182 result = qp_enqueue_locked(qpair->produce_q, in vmci_qpair_enquev()
3183 qpair->consume_q, in vmci_qpair_enquev()
3184 qpair->produce_q_size, in vmci_qpair_enquev()
3185 &msg->msg_iter); in vmci_qpair_enquev()
3200 * vmci_qpair_dequev() - Get data from the queue using iov.
3223 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_dequev()
3224 qpair->consume_q, in vmci_qpair_dequev()
3225 qpair->consume_q_size, in vmci_qpair_dequev()
3226 &msg->msg_iter, true); in vmci_qpair_dequev()
3241 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3265 result = qp_dequeue_locked(qpair->produce_q, in vmci_qpair_peekv()
3266 qpair->consume_q, in vmci_qpair_peekv()
3267 qpair->consume_q_size, in vmci_qpair_peekv()
3268 &msg->msg_iter, false); in vmci_qpair_peekv()