Lines Matching +full:num +full:- +full:ids

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio vhost-user driver
7 * This driver allows virtio devices to be used over a vhost-user socket.
14 * <socket> := vhost-user socket path to connect
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
30 #include <linux/time-internal.h>
31 #include <linux/virtio-uml.h>
32 #include <shared/as-layout.h>
77 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
79 /* Vhost-user protocol */
90 len -= rc; in full_sendmsg_fds()
94 } while (len && (rc >= 0 || rc == -EINTR)); in full_sendmsg_fds()
112 len -= rc; in full_read()
114 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN))); in full_read()
119 return -ECONNRESET; in full_read()
125 return full_read(fd, msg, sizeof(msg->header), true); in vhost_user_recv_header()
136 * In virtio time-travel mode, we're handling all the vhost-user in vhost_user_recv()
144 * to also handle messages for the simulation time - this function in vhost_user_recv()
154 size = msg->header.size; in vhost_user_recv()
156 return -EPROTO; in vhost_user_recv()
157 return full_read(fd, &msg->payload, size, false); in vhost_user_recv()
163 struct virtio_uml_platform_data *pdata = vu_dev->pdata; in vhost_user_check_reset()
165 if (rc != -ECONNRESET) in vhost_user_check_reset()
168 if (!vu_dev->registered) in vhost_user_check_reset()
171 vu_dev->registered = 0; in vhost_user_check_reset()
173 schedule_work(&pdata->conn_broken_wk); in vhost_user_check_reset()
180 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, in vhost_user_recv_resp()
188 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION)) in vhost_user_recv_resp()
189 return -EPROTO; in vhost_user_recv_resp()
204 return -EPROTO; in vhost_user_recv_u64()
213 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg, in vhost_user_recv_req()
219 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) != in vhost_user_recv_req()
221 return -EPROTO; in vhost_user_recv_req()
230 size_t size = sizeof(msg->header) + msg->header.size; in vhost_user_send()
235 msg->header.flags |= VHOST_USER_VERSION; in vhost_user_send()
243 if (!(vu_dev->protocol_features & in vhost_user_send()
248 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY; in vhost_user_send()
250 spin_lock_irqsave(&vu_dev->sock_lock, flags); in vhost_user_send()
251 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds); in vhost_user_send()
264 rc = -EIO; in vhost_user_send()
270 spin_unlock_irqrestore(&vu_dev->sock_lock, flags); in vhost_user_send()
355 reply.header = msg->header; in vhost_user_reply()
360 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0); in vhost_user_reply()
389 vu_dev->config_changed_irq = true; in vu_req_read_message()
393 virtio_device_for_each_vq((&vu_dev->vdev), vq) { in vu_req_read_message()
394 if (vq->index == msg.msg.payload.vring_state.index) { in vu_req_read_message()
396 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index); in vu_req_read_message()
402 /* not supported - VIRTIO_F_ACCESS_PLATFORM */ in vu_req_read_message()
404 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ in vu_req_read_message()
410 if (ev && !vu_dev->suspended) in vu_req_read_message()
417 /* mask EAGAIN as we try non-blocking read until socket is empty */ in vu_req_read_message()
418 vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc; in vu_req_read_message()
430 if (vu_dev->recv_rc) { in vu_req_interrupt()
431 vhost_user_check_reset(vu_dev, vu_dev->recv_rc); in vu_req_interrupt()
432 } else if (vu_dev->vq_irq_vq_map) { in vu_req_interrupt()
435 virtio_device_for_each_vq((&vu_dev->vdev), vq) { in vu_req_interrupt()
436 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index)) in vu_req_interrupt()
439 vu_dev->vq_irq_vq_map = 0; in vu_req_interrupt()
440 } else if (vu_dev->config_changed_irq) { in vu_req_interrupt()
441 virtio_config_changed(&vu_dev->vdev); in vu_req_interrupt()
442 vu_dev->config_changed_irq = false; in vu_req_interrupt()
462 vu_dev->req_fd = req_fds[0]; in vhost_user_init_slave_req()
464 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ, in vhost_user_init_slave_req()
466 vu_dev->pdev->name, vu_dev, in vhost_user_init_slave_req()
471 vu_dev->irq = rc; in vhost_user_init_slave_req()
481 um_free_irq(vu_dev->irq, vu_dev); in vhost_user_init_slave_req()
496 rc = vhost_user_get_features(vu_dev, &vu_dev->features); in vhost_user_init()
500 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) { in vhost_user_init()
502 &vu_dev->protocol_features); in vhost_user_init()
505 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F; in vhost_user_init()
507 vu_dev->protocol_features); in vhost_user_init()
512 if (vu_dev->protocol_features & in vhost_user_init()
527 size_t payload_size = sizeof(msg->payload.config) + cfg_size; in vhost_user_get_config()
528 size_t msg_size = sizeof(msg->header) + payload_size; in vhost_user_get_config()
531 if (!(vu_dev->protocol_features & in vhost_user_get_config()
538 msg->header.request = VHOST_USER_GET_CONFIG; in vhost_user_get_config()
539 msg->header.size = payload_size; in vhost_user_get_config()
540 msg->payload.config.offset = 0; in vhost_user_get_config()
541 msg->payload.config.size = cfg_size; in vhost_user_get_config()
558 if (msg->header.size != payload_size || in vhost_user_get_config()
559 msg->payload.config.size != cfg_size) { in vhost_user_get_config()
560 rc = -EPROTO; in vhost_user_get_config()
563 msg->header.size, payload_size, in vhost_user_get_config()
564 msg->payload.config.size, cfg_size); in vhost_user_get_config()
567 memcpy(buf, msg->payload.config.payload + offset, len); in vhost_user_get_config()
577 size_t payload_size = sizeof(msg->payload.config) + len; in vhost_user_set_config()
578 size_t msg_size = sizeof(msg->header) + payload_size; in vhost_user_set_config()
581 if (!(vu_dev->protocol_features & in vhost_user_set_config()
588 msg->header.request = VHOST_USER_SET_CONFIG; in vhost_user_set_config()
589 msg->header.size = payload_size; in vhost_user_set_config()
590 msg->payload.config.offset = offset; in vhost_user_set_config()
591 msg->payload.config.size = len; in vhost_user_set_config()
592 memcpy(msg->payload.config.payload, buf, len); in vhost_user_set_config()
609 return -EFAULT; in vhost_user_init_mem_region()
611 region_out->guest_addr = addr; in vhost_user_init_mem_region()
612 region_out->user_addr = addr; in vhost_user_init_mem_region()
613 region_out->size = size; in vhost_user_init_mem_region()
614 region_out->mmap_offset = mem_offset; in vhost_user_init_mem_region()
617 rc = phys_mapping(addr + size - 1, &mem_offset); in vhost_user_init_mem_region()
619 addr + size - 1, rc, *fd_out)) in vhost_user_init_mem_region()
620 return -EFAULT; in vhost_user_init_mem_region()
629 .payload.mem_regions.num = 1, in vhost_user_set_mem_table()
631 unsigned long reserved = uml_reserved - uml_physmem; in vhost_user_set_mem_table()
647 * Thus, don't advertise this space to the vhost-user slave. This in vhost_user_set_mem_table()
651 * don't just have the slave read an all-zeroes buffer from the in vhost_user_set_mem_table()
658 * file-backed memory in vhost_user_set_mem_table()
670 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved, in vhost_user_set_mem_table()
677 msg.payload.mem_regions.num++; in vhost_user_set_mem_table()
685 msg.payload.mem_regions.num); in vhost_user_set_mem_table()
689 u32 request, u32 index, u32 num) in vhost_user_set_vring_state() argument
695 .payload.vring_state.num = num, in vhost_user_set_vring_state()
702 u32 index, u32 num) in vhost_user_set_vring_num() argument
705 index, num); in vhost_user_set_vring_num()
742 return -EINVAL; in vhost_user_set_vring_fd()
767 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES))) in vhost_user_set_vring_enable()
779 struct virtio_uml_vq_info *info = vq->priv; in vu_notify()
783 if (info->suspended) in vu_notify()
788 if (info->kick_fd < 0) { in vu_notify()
791 vu_dev = to_virtio_uml_device(vq->vdev); in vu_notify()
794 vq->index, 0) == 0; in vu_notify()
798 rc = os_write_file(info->kick_fd, &n, sizeof(n)); in vu_notify()
799 } while (rc == -EINTR); in vu_notify()
806 struct virtio_uml_vq_info *info = vq->priv; in vu_interrupt()
812 rc = os_read_file(info->call_fd, &n, sizeof(n)); in vu_interrupt()
815 } while (rc == sizeof(n) || rc == -EINTR); in vu_interrupt()
816 WARN(rc != -EAGAIN, "read returned %d\n", rc); in vu_interrupt()
841 return vu_dev->status; in vu_get_status()
848 vu_dev->status = status; in vu_set_status()
855 vu_dev->status = 0; in vu_reset()
860 struct virtio_uml_vq_info *info = vq->priv; in vu_del_vq()
862 if (info->call_fd >= 0) { in vu_del_vq()
865 vu_dev = to_virtio_uml_device(vq->vdev); in vu_del_vq()
867 um_free_irq(vu_dev->irq, vq); in vu_del_vq()
868 os_close_file(info->call_fd); in vu_del_vq()
871 if (info->kick_fd >= 0) in vu_del_vq()
872 os_close_file(info->kick_fd); in vu_del_vq()
885 list_for_each_entry_reverse(vq, &vdev->vqs, list) in vu_del_vqs()
886 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false)); in vu_del_vqs()
891 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in vu_del_vqs()
898 struct virtio_uml_vq_info *info = vq->priv; in vu_setup_vq_call_fd()
903 if (vu_dev->protocol_features & in vu_setup_vq_call_fd()
905 vu_dev->protocol_features & in vu_setup_vq_call_fd()
907 info->call_fd = -1; in vu_setup_vq_call_fd()
916 info->call_fd = call_fds[0]; in vu_setup_vq_call_fd()
917 rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ, in vu_setup_vq_call_fd()
918 vu_interrupt, IRQF_SHARED, info->name, vq); in vu_setup_vq_call_fd()
922 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]); in vu_setup_vq_call_fd()
929 um_free_irq(vu_dev->irq, vq); in vu_setup_vq_call_fd()
944 struct platform_device *pdev = vu_dev->pdev; in vu_setup_vq()
947 int num = MAX_SUPPORTED_QUEUE_SIZE; in vu_setup_vq() local
952 rc = -ENOMEM; in vu_setup_vq()
955 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name, in vu_setup_vq()
956 pdev->id, name); in vu_setup_vq()
958 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true, in vu_setup_vq()
959 ctx, vu_notify, callback, info->name); in vu_setup_vq()
961 rc = -ENOMEM; in vu_setup_vq()
964 vq->priv = info; in vu_setup_vq()
965 vq->num_max = num; in vu_setup_vq()
966 num = virtqueue_get_vring_size(vq); in vu_setup_vq()
968 if (vu_dev->protocol_features & in vu_setup_vq()
970 info->kick_fd = -1; in vu_setup_vq()
975 info->kick_fd = rc; in vu_setup_vq()
982 rc = vhost_user_set_vring_num(vu_dev, index, num); in vu_setup_vq()
994 (u64) -1); in vu_setup_vq()
1001 if (info->call_fd >= 0) { in vu_setup_vq()
1002 um_free_irq(vu_dev->irq, vq); in vu_setup_vq()
1003 os_close_file(info->call_fd); in vu_setup_vq()
1006 if (info->kick_fd >= 0) in vu_setup_vq()
1007 os_close_file(info->kick_fd); in vu_setup_vq()
1027 return -EINVAL; in vu_find_vqs()
1036 if (!vqi->name) { in vu_find_vqs()
1041 vqs[i] = vu_setup_vq(vdev, queue_idx++, vqi->callback, in vu_find_vqs()
1042 vqi->name, vqi->ctx); in vu_find_vqs()
1049 list_for_each_entry(vq, &vdev->vqs, list) { in vu_find_vqs()
1050 struct virtio_uml_vq_info *info = vq->priv; in vu_find_vqs()
1052 if (info->kick_fd >= 0) { in vu_find_vqs()
1053 rc = vhost_user_set_vring_kick(vu_dev, vq->index, in vu_find_vqs()
1054 info->kick_fd); in vu_find_vqs()
1059 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true); in vu_find_vqs()
1075 return vu_dev->features; in vu_get_features()
1081 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F; in vu_finalize_features()
1084 vu_dev->features = vdev->features | supported; in vu_finalize_features()
1086 return vhost_user_set_features(vu_dev, vu_dev->features); in vu_finalize_features()
1093 return vu_dev->pdev->name; in vu_bus_name()
1118 if (vu_dev->req_fd >= 0) { in virtio_uml_release_dev()
1119 um_free_irq(vu_dev->irq, vu_dev); in virtio_uml_release_dev()
1120 os_close_file(vu_dev->req_fd); in virtio_uml_release_dev()
1123 os_close_file(vu_dev->sock); in virtio_uml_release_dev()
1132 if (WARN_ON(vdev->config != &virtio_uml_config_ops)) in virtio_uml_set_no_vq_suspend()
1135 vu_dev->no_vq_suspend = no_vq_suspend; in virtio_uml_set_no_vq_suspend()
1136 dev_info(&vdev->dev, "%sabled VQ suspend\n", in virtio_uml_set_no_vq_suspend()
1147 vu_dev = platform_get_drvdata(pdata->pdev); in vu_of_conn_broken()
1149 virtio_break_device(&vu_dev->vdev); in vu_of_conn_broken()
1163 struct device_node *np = pdev->dev.of_node; in virtio_uml_create_pdata()
1168 return ERR_PTR(-EINVAL); in virtio_uml_create_pdata()
1170 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); in virtio_uml_create_pdata()
1172 return ERR_PTR(-ENOMEM); in virtio_uml_create_pdata()
1174 INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken); in virtio_uml_create_pdata()
1175 pdata->pdev = pdev; in virtio_uml_create_pdata()
1177 ret = of_property_read_string(np, "socket-path", &pdata->socket_path); in virtio_uml_create_pdata()
1181 ret = of_property_read_u32(np, "virtio-device-id", in virtio_uml_create_pdata()
1182 &pdata->virtio_device_id); in virtio_uml_create_pdata()
1191 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; in virtio_uml_probe()
1203 return -ENOMEM; in virtio_uml_probe()
1205 vu_dev->pdata = pdata; in virtio_uml_probe()
1206 vu_dev->vdev.dev.parent = &pdev->dev; in virtio_uml_probe()
1207 vu_dev->vdev.dev.release = virtio_uml_release_dev; in virtio_uml_probe()
1208 vu_dev->vdev.config = &virtio_uml_config_ops; in virtio_uml_probe()
1209 vu_dev->vdev.id.device = pdata->virtio_device_id; in virtio_uml_probe()
1210 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID; in virtio_uml_probe()
1211 vu_dev->pdev = pdev; in virtio_uml_probe()
1212 vu_dev->req_fd = -1; in virtio_uml_probe()
1217 rc = os_connect_socket(pdata->socket_path); in virtio_uml_probe()
1218 } while (rc == -EINTR); in virtio_uml_probe()
1221 vu_dev->sock = rc; in virtio_uml_probe()
1223 spin_lock_init(&vu_dev->sock_lock); in virtio_uml_probe()
1231 device_set_wakeup_capable(&vu_dev->vdev.dev, true); in virtio_uml_probe()
1233 rc = register_virtio_device(&vu_dev->vdev); in virtio_uml_probe()
1235 put_device(&vu_dev->vdev.dev); in virtio_uml_probe()
1236 vu_dev->registered = 1; in virtio_uml_probe()
1240 os_close_file(vu_dev->sock); in virtio_uml_probe()
1250 unregister_virtio_device(&vu_dev->vdev); in virtio_uml_remove()
1260 .init_name = "virtio-uml-cmdline",
1270 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; in vu_unregister_cmdline_device()
1272 kfree(pdata->socket_path); in vu_unregister_cmdline_device()
1284 vu_dev = platform_get_drvdata(pdata->pdev); in vu_conn_broken()
1286 virtio_break_device(&vu_dev->vdev); in vu_conn_broken()
1288 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL); in vu_conn_broken()
1293 const char *ids = strchr(device, ':'); in vu_cmdline_set() local
1300 if (!ids || ids == device) in vu_cmdline_set()
1301 return -EINVAL; in vu_cmdline_set()
1303 processed = sscanf(ids, ":%u%n:%d%n", in vu_cmdline_set()
1307 if (processed < 1 || ids[consumed]) in vu_cmdline_set()
1308 return -EINVAL; in vu_cmdline_set()
1320 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL); in vu_cmdline_set()
1322 return -ENOMEM; in vu_cmdline_set()
1327 pr_info("Registering device virtio-uml.%d id=%d at %s\n", in vu_cmdline_set()
1330 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml", in vu_cmdline_set()
1337 ppdata = pdev->dev.platform_data; in vu_cmdline_set()
1338 ppdata->pdev = pdev; in vu_cmdline_set()
1339 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken); in vu_cmdline_set()
1351 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; in vu_cmdline_get_device()
1355 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n", in vu_cmdline_get_device()
1356 pdata->socket_path, pdata->virtio_device_id, pdev->id); in vu_cmdline_get_device()
1377 " Configure a virtio device over a vhost-user socket.\n"
1405 if (!vu_dev->no_vq_suspend) { in virtio_uml_suspend()
1408 virtio_device_for_each_vq((&vu_dev->vdev), vq) { in virtio_uml_suspend()
1409 struct virtio_uml_vq_info *info = vq->priv; in virtio_uml_suspend()
1411 info->suspended = true; in virtio_uml_suspend()
1412 vhost_user_set_vring_enable(vu_dev, vq->index, false); in virtio_uml_suspend()
1416 if (!device_may_wakeup(&vu_dev->vdev.dev)) { in virtio_uml_suspend()
1417 vu_dev->suspended = true; in virtio_uml_suspend()
1421 return irq_set_irq_wake(vu_dev->irq, 1); in virtio_uml_suspend()
1428 if (!vu_dev->no_vq_suspend) { in virtio_uml_resume()
1431 virtio_device_for_each_vq((&vu_dev->vdev), vq) { in virtio_uml_resume()
1432 struct virtio_uml_vq_info *info = vq->priv; in virtio_uml_resume()
1434 info->suspended = false; in virtio_uml_resume()
1435 vhost_user_set_vring_enable(vu_dev, vq->index, true); in virtio_uml_resume()
1439 vu_dev->suspended = false; in virtio_uml_resume()
1441 if (!device_may_wakeup(&vu_dev->vdev.dev)) in virtio_uml_resume()
1444 return irq_set_irq_wake(vu_dev->irq, 0); in virtio_uml_resume()
1451 .name = "virtio-uml",
1473 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");