Lines Matching refs:kioreq

1146 	struct privcmd_kernel_ioreq *kioreq;  member
1164 struct privcmd_kernel_ioreq *kioreq = port->kioreq; in ioeventfd_interrupt() local
1165 struct ioreq *ioreq = &kioreq->ioreq[port->vcpu]; in ioeventfd_interrupt()
1183 spin_lock(&kioreq->lock); in ioeventfd_interrupt()
1186 list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) { in ioeventfd_interrupt()
1195 spin_unlock(&kioreq->lock); in ioeventfd_interrupt()
1214 static void ioreq_free(struct privcmd_kernel_ioreq *kioreq) in ioreq_free() argument
1216 struct ioreq_port *ports = kioreq->ports; in ioreq_free()
1221 list_del(&kioreq->list); in ioreq_free()
1223 for (i = kioreq->vcpus - 1; i >= 0; i--) in ioreq_free()
1226 kfree(kioreq); in ioreq_free()
1232 struct privcmd_kernel_ioreq *kioreq; in alloc_ioreq() local
1241 size = struct_size(kioreq, ports, ioeventfd->vcpus); in alloc_ioreq()
1242 kioreq = kzalloc(size, GFP_KERNEL); in alloc_ioreq()
1243 if (!kioreq) in alloc_ioreq()
1246 kioreq->dom = ioeventfd->dom; in alloc_ioreq()
1247 kioreq->vcpus = ioeventfd->vcpus; in alloc_ioreq()
1248 kioreq->uioreq = ioeventfd->ioreq; in alloc_ioreq()
1249 spin_lock_init(&kioreq->lock); in alloc_ioreq()
1250 INIT_LIST_HEAD(&kioreq->ioeventfds); in alloc_ioreq()
1263 kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0])); in alloc_ioreq()
1267 kioreq->vcpus, sizeof(*ports)); in alloc_ioreq()
1273 for (i = 0; i < kioreq->vcpus; i++) { in alloc_ioreq()
1274 kioreq->ports[i].vcpu = i; in alloc_ioreq()
1275 kioreq->ports[i].port = ports[i]; in alloc_ioreq()
1276 kioreq->ports[i].kioreq = kioreq; in alloc_ioreq()
1280 &kioreq->ports[i]); in alloc_ioreq()
1287 list_add_tail(&kioreq->list, &ioreq_list); in alloc_ioreq()
1289 return kioreq; in alloc_ioreq()
1293 unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]); in alloc_ioreq()
1297 kfree(kioreq); in alloc_ioreq()
1304 struct privcmd_kernel_ioreq *kioreq; in get_ioreq() local
1307 list_for_each_entry(kioreq, &ioreq_list, list) { in get_ioreq()
1314 if (kioreq->uioreq != ioeventfd->ioreq) { in get_ioreq()
1316 } else if (kioreq->dom != ioeventfd->dom || in get_ioreq()
1317 kioreq->vcpus != ioeventfd->vcpus) { in get_ioreq()
1319 kioreq->dom, ioeventfd->dom, kioreq->vcpus, in get_ioreq()
1325 spin_lock_irqsave(&kioreq->lock, flags); in get_ioreq()
1326 list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) { in get_ioreq()
1328 spin_unlock_irqrestore(&kioreq->lock, flags); in get_ioreq()
1332 spin_unlock_irqrestore(&kioreq->lock, flags); in get_ioreq()
1334 return kioreq; in get_ioreq()
1351 struct privcmd_kernel_ioreq *kioreq; in privcmd_ioeventfd_assign() local
1392 kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd); in privcmd_ioeventfd_assign()
1393 if (IS_ERR(kioreq)) { in privcmd_ioeventfd_assign()
1395 ret = PTR_ERR(kioreq); in privcmd_ioeventfd_assign()
1399 spin_lock_irqsave(&kioreq->lock, flags); in privcmd_ioeventfd_assign()
1400 list_add_tail(&kioeventfd->list, &kioreq->ioeventfds); in privcmd_ioeventfd_assign()
1401 spin_unlock_irqrestore(&kioreq->lock, flags); in privcmd_ioeventfd_assign()
1417 struct privcmd_kernel_ioreq *kioreq, *tkioreq; in privcmd_ioeventfd_deassign() local
1427 list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) { in privcmd_ioeventfd_deassign()
1433 if (kioreq->dom != ioeventfd->dom || in privcmd_ioeventfd_deassign()
1434 kioreq->uioreq != ioeventfd->ioreq || in privcmd_ioeventfd_deassign()
1435 kioreq->vcpus != ioeventfd->vcpus) in privcmd_ioeventfd_deassign()
1438 spin_lock_irqsave(&kioreq->lock, flags); in privcmd_ioeventfd_deassign()
1439 list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) { in privcmd_ioeventfd_deassign()
1442 spin_unlock_irqrestore(&kioreq->lock, flags); in privcmd_ioeventfd_deassign()
1444 if (list_empty(&kioreq->ioeventfds)) in privcmd_ioeventfd_deassign()
1445 ioreq_free(kioreq); in privcmd_ioeventfd_deassign()
1449 spin_unlock_irqrestore(&kioreq->lock, flags); in privcmd_ioeventfd_deassign()
1488 struct privcmd_kernel_ioreq *kioreq, *tmp; in privcmd_ioeventfd_exit() local
1492 list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) { in privcmd_ioeventfd_exit()
1495 spin_lock_irqsave(&kioreq->lock, flags); in privcmd_ioeventfd_exit()
1496 list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) in privcmd_ioeventfd_exit()
1498 spin_unlock_irqrestore(&kioreq->lock, flags); in privcmd_ioeventfd_exit()
1500 ioreq_free(kioreq); in privcmd_ioeventfd_exit()