Lines Matching +full:single +full:- +full:channel

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
98 * @channel: RPC channel
103 static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol) in vmw_open_channel() argument
112 return -EINVAL; in vmw_open_channel()
114 channel->channel_id = HIGH_WORD(edx); in vmw_open_channel()
115 channel->cookie_high = esi; in vmw_open_channel()
116 channel->cookie_low = edi; in vmw_open_channel()
126 * @channel: RPC channel
130 static int vmw_close_channel(struct rpc_channel *channel) in vmw_close_channel() argument
135 0, channel->channel_id << 16, in vmw_close_channel()
136 channel->cookie_high, in vmw_close_channel()
137 channel->cookie_low, in vmw_close_channel()
141 return -EINVAL; in vmw_close_channel()
147 * vmw_port_hb_out - Send the message payload either through the
148 * high-bandwidth port if available, or through the backdoor otherwise.
149 * @channel: The rpc channel.
150 * @msg: NULL-terminated message.
151 * @hb: Whether the high-bandwidth port is available.
155 static unsigned long vmw_port_hb_out(struct rpc_channel *channel, in vmw_port_hb_out() argument
166 channel->channel_id << 16, in vmw_port_hb_out()
167 (uintptr_t) msg, channel->cookie_low, in vmw_port_hb_out()
168 channel->cookie_high, in vmw_port_hb_out()
181 msg_len -= bytes; in vmw_port_hb_out()
186 word, channel->channel_id << 16, in vmw_port_hb_out()
187 channel->cookie_high, in vmw_port_hb_out()
188 channel->cookie_low, in vmw_port_hb_out()
196 * vmw_port_hb_in - Receive the message payload either through the
197 * high-bandwidth port if available, or through the backdoor otherwise.
198 * @channel: The rpc channel.
201 * @hb: Whether the high-bandwidth port is available.
205 static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, in vmw_port_hb_in() argument
215 channel->channel_id << 16, in vmw_port_hb_in()
216 channel->cookie_high, in vmw_port_hb_in()
217 (uintptr_t) reply, channel->cookie_low, in vmw_port_hb_in()
231 channel->channel_id << 16, in vmw_port_hb_in()
232 channel->cookie_high, in vmw_port_hb_in()
233 channel->cookie_low, in vmw_port_hb_in()
240 reply_len -= bytes; in vmw_port_hb_in()
251 * @channel: RPC channel
256 static int vmw_send_msg(struct rpc_channel *channel, const char *msg) in vmw_send_msg() argument
266 msg_len, channel->channel_id << 16, in vmw_send_msg()
267 channel->cookie_high, in vmw_send_msg()
268 channel->cookie_low, in vmw_send_msg()
273 return -EINVAL; in vmw_send_msg()
277 ebx = vmw_port_hb_out(channel, msg, in vmw_send_msg()
290 return -EINVAL; in vmw_send_msg()
300 * @channel: channel opened by vmw_open_channel
304 static int vmw_recv_msg(struct rpc_channel *channel, void **msg, in vmw_recv_msg() argument
320 0, channel->channel_id << 16, in vmw_recv_msg()
321 channel->cookie_high, in vmw_recv_msg()
322 channel->cookie_low, in vmw_recv_msg()
327 return -EINVAL; in vmw_recv_msg()
338 return -ENOMEM; in vmw_recv_msg()
343 ebx = vmw_port_hb_in(channel, reply, reply_len, in vmw_recv_msg()
353 return -EINVAL; in vmw_recv_msg()
360 channel->channel_id << 16, in vmw_recv_msg()
361 channel->cookie_high, in vmw_recv_msg()
362 channel->cookie_low, in vmw_recv_msg()
373 return -EINVAL; in vmw_recv_msg()
380 return -EINVAL; in vmw_recv_msg()
394 * a string, and it is up to the caller to post-process.
405 struct rpc_channel channel; in vmw_host_get_guestinfo() local
410 return -ENODEV; in vmw_host_get_guestinfo()
413 return -EINVAL; in vmw_host_get_guestinfo()
415 msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); in vmw_host_get_guestinfo()
419 return -ENOMEM; in vmw_host_get_guestinfo()
422 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) in vmw_host_get_guestinfo()
425 if (vmw_send_msg(&channel, msg) || in vmw_host_get_guestinfo()
426 vmw_recv_msg(&channel, (void *) &reply, &reply_len)) in vmw_host_get_guestinfo()
429 vmw_close_channel(&channel); in vmw_host_get_guestinfo()
434 reply_len = max(reply_len - 2, (size_t) 0); in vmw_host_get_guestinfo()
449 vmw_close_channel(&channel); in vmw_host_get_guestinfo()
456 return -EINVAL; in vmw_host_get_guestinfo()
471 struct rpc_channel channel; in vmw_host_printf() local
477 return -ENODEV; in vmw_host_printf()
487 return -ENOMEM; in vmw_host_printf()
494 return -ENOMEM; in vmw_host_printf()
497 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) in vmw_host_printf()
500 if (vmw_send_msg(&channel, msg)) in vmw_host_printf()
503 vmw_close_channel(&channel); in vmw_host_printf()
510 vmw_close_channel(&channel); in vmw_host_printf()
516 return -EINVAL; in vmw_host_printf()
521 * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
523 * Sends a message from user-space to host.
524 * Can also receive a result from host and return that to user-space.
537 struct rpc_channel channel; in vmw_msg_ioctl() local
544 return -ENOMEM; in vmw_msg_ioctl()
547 length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send), in vmw_msg_ioctl()
552 return -EINVAL; in vmw_msg_ioctl()
556 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) { in vmw_msg_ioctl()
557 DRM_ERROR("Failed to open channel.\n"); in vmw_msg_ioctl()
561 if (vmw_send_msg(&channel, msg)) { in vmw_msg_ioctl()
566 if (!arg->send_only) { in vmw_msg_ioctl()
570 if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) { in vmw_msg_ioctl()
575 if (copy_to_user((void __user *)((unsigned long)arg->receive), in vmw_msg_ioctl()
581 arg->receive_len = (__u32)reply_len; in vmw_msg_ioctl()
586 vmw_close_channel(&channel); in vmw_msg_ioctl()
592 vmw_close_channel(&channel); in vmw_msg_ioctl()
596 return -EINVAL; in vmw_msg_ioctl()
626 * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
637 * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
649 /* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
655 * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
658 * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
681 * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
682 * kernel-internal counters. Adds PFN mapping to the hypervisor.
684 * Create a single mksGuestStat instance descriptor and corresponding structures
685 * for all kernel-internal counters. The corresponding PFNs are mapped with the
699 /* Allocate pages for the kernel-internal instance descriptor */ in mksstat_init_kern_id()
703 return -ENOMEM; in mksstat_init_kern_id()
710 /* Set up all kernel-internal counters and corresponding structures */ in mksstat_init_kern_id()
717 BUG_ON(pstrs_acc - pstrs > PAGE_SIZE); in mksstat_init_kern_id()
719 /* Set up the kernel-internal instance descriptor */ in mksstat_init_kern_id()
720 pdesc->reservedMBZ = 0; in mksstat_init_kern_id()
721 pdesc->statStartVA = (uintptr_t)pstat; in mksstat_init_kern_id()
722 pdesc->strsStartVA = (uintptr_t)pstrs; in mksstat_init_kern_id()
723 pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT; in mksstat_init_kern_id()
724 pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT; in mksstat_init_kern_id()
725 pdesc->strsLength = pstrs_acc - pstrs; in mksstat_init_kern_id()
726 snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d", in mksstat_init_kern_id()
727 MKSSTAT_KERNEL_DESCRIPTION, current->pid); in mksstat_init_kern_id()
729 pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat)); in mksstat_init_kern_id()
730 reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1); in mksstat_init_kern_id()
732 pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo)); in mksstat_init_kern_id()
733 reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1); in mksstat_init_kern_id()
735 pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs)); in mksstat_init_kern_id()
736 reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1); in mksstat_init_kern_id()
746 * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
749 * Find a slot for a single kernel-internal mksGuestStat instance descriptor.
750 * In case no such was already present, allocate a new one and set up a kernel-
755 * Return: Non-negative slot on success, negative error code on error.
763 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { in vmw_mksstat_get_kern_slot()
764 const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids); in vmw_mksstat_get_kern_slot()
767 if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot])) in vmw_mksstat_get_kern_slot()
771 if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) { in vmw_mksstat_get_kern_slot()
772 const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]); in vmw_mksstat_get_kern_slot()
775 /* Reset top-timer tracking for this slot */ in vmw_mksstat_get_kern_slot()
776 dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT; in vmw_mksstat_get_kern_slot()
778 atomic_set(&dev_priv->mksstat_kern_pids[slot], pid); in vmw_mksstat_get_kern_slot()
782 atomic_set(&dev_priv->mksstat_kern_pids[slot], 0); in vmw_mksstat_get_kern_slot()
787 return -ENOSPC; in vmw_mksstat_get_kern_slot()
793 * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
794 * mksGuestStat instance-descriptor page and unpins all related user pages.
797 * the instance-descriptor page itself.
807 for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i) in vmw_mksstat_cleanup_descriptor()
808 unpin_user_page(pfn_to_page(pdesc->statPPNs[i])); in vmw_mksstat_cleanup_descriptor()
810 for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i) in vmw_mksstat_cleanup_descriptor()
811 unpin_user_page(pfn_to_page(pdesc->infoPPNs[i])); in vmw_mksstat_cleanup_descriptor()
813 for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i) in vmw_mksstat_cleanup_descriptor()
814 unpin_user_page(pfn_to_page(pdesc->strsPPNs[i])); in vmw_mksstat_cleanup_descriptor()
838 /* Discard all userspace-originating instance descriptors and unpin all related pages */ in vmw_mksstat_remove_all()
839 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) { in vmw_mksstat_remove_all()
840 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]); in vmw_mksstat_remove_all()
846 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED); in vmw_mksstat_remove_all()
852 struct page *const page = dev_priv->mksstat_user_pages[i]; in vmw_mksstat_remove_all()
856 dev_priv->mksstat_user_pages[i] = NULL; in vmw_mksstat_remove_all()
857 atomic_set(&dev_priv->mksstat_user_pids[i], 0); in vmw_mksstat_remove_all()
864 ret = -EAGAIN; in vmw_mksstat_remove_all()
868 /* Discard all kernel-internal instance descriptors and free all related pages */ in vmw_mksstat_remove_all()
869 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { in vmw_mksstat_remove_all()
870 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]); in vmw_mksstat_remove_all()
876 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED); in vmw_mksstat_remove_all()
882 struct page *const page = dev_priv->mksstat_kern_pages[i]; in vmw_mksstat_remove_all()
886 dev_priv->mksstat_kern_pages[i] = NULL; in vmw_mksstat_remove_all()
887 atomic_set(&dev_priv->mksstat_kern_pids[i], 0); in vmw_mksstat_remove_all()
894 ret = -EAGAIN; in vmw_mksstat_remove_all()
922 * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
925 * Create a hypervisor PFN mapping, containing a single mksGuestStat instance
942 const size_t num_pages_stat = PFN_UP(arg->stat_len); in vmw_mksstat_add_ioctl()
943 const size_t num_pages_info = PFN_UP(arg->info_len); in vmw_mksstat_add_ioctl()
944 const size_t num_pages_strs = PFN_UP(arg->strs_len); in vmw_mksstat_add_ioctl()
955 int ret_err = -ENOMEM; in vmw_mksstat_add_ioctl()
957 arg->id = -1; in vmw_mksstat_add_ioctl()
959 if (!arg->stat || !arg->info || !arg->strs) in vmw_mksstat_add_ioctl()
960 return -EINVAL; in vmw_mksstat_add_ioctl()
962 if (!arg->stat_len || !arg->info_len || !arg->strs_len) in vmw_mksstat_add_ioctl()
963 return -EINVAL; in vmw_mksstat_add_ioctl()
965 if (!arg->description) in vmw_mksstat_add_ioctl()
966 return -EINVAL; in vmw_mksstat_add_ioctl()
968 if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) || in vmw_mksstat_add_ioctl()
969 num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) || in vmw_mksstat_add_ioctl()
970 num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs)) in vmw_mksstat_add_ioctl()
971 return -EINVAL; in vmw_mksstat_add_ioctl()
974 for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot) in vmw_mksstat_add_ioctl()
975 if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED)) in vmw_mksstat_add_ioctl()
978 if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids)) in vmw_mksstat_add_ioctl()
979 return -ENOSPC; in vmw_mksstat_add_ioctl()
981 BUG_ON(dev_priv->mksstat_user_pages[slot]); in vmw_mksstat_add_ioctl()
983 /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */ in vmw_mksstat_add_ioctl()
985 ARRAY_SIZE(pdesc->statPPNs) + in vmw_mksstat_add_ioctl()
986 ARRAY_SIZE(pdesc->infoPPNs) + in vmw_mksstat_add_ioctl()
987 ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL); in vmw_mksstat_add_ioctl()
992 pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs); in vmw_mksstat_add_ioctl()
993 pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs); in vmw_mksstat_add_ioctl()
1004 pdesc->reservedMBZ = 0; in vmw_mksstat_add_ioctl()
1005 pdesc->statStartVA = arg->stat; in vmw_mksstat_add_ioctl()
1006 pdesc->strsStartVA = arg->strs; in vmw_mksstat_add_ioctl()
1007 pdesc->statLength = arg->stat_len; in vmw_mksstat_add_ioctl()
1008 pdesc->infoLength = arg->info_len; in vmw_mksstat_add_ioctl()
1009 pdesc->strsLength = arg->strs_len; in vmw_mksstat_add_ioctl()
1010 desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description), in vmw_mksstat_add_ioctl()
1011 ARRAY_SIZE(pdesc->description) - 1); in vmw_mksstat_add_ioctl()
1014 ret_err = -EFAULT; in vmw_mksstat_add_ioctl()
1018 reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs)); in vmw_mksstat_add_ioctl()
1019 reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs)); in vmw_mksstat_add_ioctl()
1020 reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs)); in vmw_mksstat_add_ioctl()
1023 nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat); in vmw_mksstat_add_ioctl()
1028 pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]); in vmw_mksstat_add_ioctl()
1030 nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info); in vmw_mksstat_add_ioctl()
1035 pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]); in vmw_mksstat_add_ioctl()
1037 nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs); in vmw_mksstat_add_ioctl()
1042 pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]); in vmw_mksstat_add_ioctl()
1049 dev_priv->mksstat_user_pages[slot] = page; in vmw_mksstat_add_ioctl()
1050 atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current)); in vmw_mksstat_add_ioctl()
1052 arg->id = slot; in vmw_mksstat_add_ioctl()
1054 …DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdes… in vmw_mksstat_add_ioctl()
1072 atomic_set(&dev_priv->mksstat_user_pids[slot], 0); in vmw_mksstat_add_ioctl()
1081 * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
1084 * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
1101 const size_t slot = arg->id; in vmw_mksstat_remove_ioctl()
1104 if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids)) in vmw_mksstat_remove_ioctl()
1105 return -EINVAL; in vmw_mksstat_remove_ioctl()
1107 DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot); in vmw_mksstat_remove_ioctl()
1110 pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED); in vmw_mksstat_remove_ioctl()
1116 struct page *const page = dev_priv->mksstat_user_pages[slot]; in vmw_mksstat_remove_ioctl()
1120 dev_priv->mksstat_user_pages[slot] = NULL; in vmw_mksstat_remove_ioctl()
1121 atomic_set(&dev_priv->mksstat_user_pids[slot], 0); in vmw_mksstat_remove_ioctl()
1129 return -EAGAIN; in vmw_mksstat_remove_ioctl()