Lines Matching +full:- +full:chn +full:- +full:disabled

1 // SPDX-License-Identifier: GPL-2.0-only
7 * must dynamically map irqs<->event channels. The event channels
15 * 1. Inter-domain notifications. This includes all the virtual
16 * device events, since they're driven by front-ends in another domain
18 * 2. VIRQs, typically used for timers. These are per-cpu events.
20 * 4. PIRQs - Hardware interrupts.
59 #include <xen/xen-ops.h>
87 * type - enum xen_irq_type
88 * event channel - irq->event channel mapping
89 * cpu - cpu this event channel is bound to
90 * index - type-specific information:
91 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
93 * VIRQ - virq number
94 * IPI - IPI vector
95 * EVTCHN -
113 unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
146 * This lock protects updates to the following mapping and reference-count
155 * IRQ-desc lock
157 * irq_info->lock
162 /* IRQ <-> VIRQ mapping. */
163 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
165 /* IRQ <-> IPI mapping */
166 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
167 /* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
168 static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
184 #define VALID_EVTCHN(chn) ((chn) != 0) argument
201 WRITE_ONCE(evtchn_row[col], -1); in clear_evtchn_to_irq_row()
222 return -EINVAL; in set_evtchn_to_irq()
228 /* Unallocated irq entries return -1 anyway */ in set_evtchn_to_irq()
229 if (irq == -1) in set_evtchn_to_irq()
234 return -ENOMEM; in set_evtchn_to_irq()
283 if (!info->is_accounted) in channels_on_cpu_dec()
286 info->is_accounted = 0; in channels_on_cpu_dec()
288 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids)) in channels_on_cpu_dec()
291 WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0)); in channels_on_cpu_dec()
296 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids)) in channels_on_cpu_inc()
299 if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1, in channels_on_cpu_inc()
303 info->is_accounted = 1; in channels_on_cpu_inc()
317 unsigned int irq = info->irq; in delayed_free_irq()
335 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); in xen_irq_info_common_setup()
337 info->type = type; in xen_irq_info_common_setup()
338 info->evtchn = evtchn; in xen_irq_info_common_setup()
339 info->cpu = cpu; in xen_irq_info_common_setup()
340 info->mask_reason = EVT_MASK_REASON_EXPLICIT; in xen_irq_info_common_setup()
341 raw_spin_lock_init(&info->lock); in xen_irq_info_common_setup()
343 ret = set_evtchn_to_irq(evtchn, info->irq); in xen_irq_info_common_setup()
347 irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN); in xen_irq_info_common_setup()
359 info->u.interdomain = dev; in xen_irq_info_evtchn_setup()
361 atomic_inc(&dev->event_channels); in xen_irq_info_evtchn_setup()
369 info->u.ipi = ipi; in xen_irq_info_ipi_setup()
371 per_cpu(ipi_to_irq, cpu)[ipi] = info->irq; in xen_irq_info_ipi_setup()
380 info->u.virq = virq; in xen_irq_info_virq_setup()
382 per_cpu(virq_to_irq, cpu)[virq] = info->irq; in xen_irq_info_virq_setup()
391 info->u.pirq.pirq = pirq; in xen_irq_info_pirq_setup()
392 info->u.pirq.gsi = gsi; in xen_irq_info_pirq_setup()
393 info->u.pirq.domid = domid; in xen_irq_info_pirq_setup()
394 info->u.pirq.flags = flags; in xen_irq_info_pirq_setup()
401 set_evtchn_to_irq(info->evtchn, -1); in xen_irq_info_cleanup()
402 xen_evtchn_port_remove(info->evtchn, info->cpu); in xen_irq_info_cleanup()
403 info->evtchn = 0; in xen_irq_info_cleanup()
419 return info->evtchn; in evtchn_from_irq()
426 return info ? info->irq : -1; in irq_from_evtchn()
443 BUG_ON(info->type != IRQT_IPI); in ipi_from_irq()
445 return info->u.ipi; in ipi_from_irq()
451 BUG_ON(info->type != IRQT_VIRQ); in virq_from_irq()
453 return info->u.virq; in virq_from_irq()
459 BUG_ON(info->type != IRQT_PIRQ); in pirq_from_irq()
461 return info->u.pirq.pirq; in pirq_from_irq()
468 return info ? info->cpu : 0; in cpu_from_evtchn()
475 raw_spin_lock_irqsave(&info->lock, flags); in do_mask()
477 if (!info->mask_reason) in do_mask()
478 mask_evtchn(info->evtchn); in do_mask()
480 info->mask_reason |= reason; in do_mask()
482 raw_spin_unlock_irqrestore(&info->lock, flags); in do_mask()
489 raw_spin_lock_irqsave(&info->lock, flags); in do_unmask()
491 info->mask_reason &= ~reason; in do_unmask()
493 if (!info->mask_reason) in do_unmask()
494 unmask_evtchn(info->evtchn); in do_unmask()
496 raw_spin_unlock_irqrestore(&info->lock, flags); in do_unmask()
508 BUG_ON(info->type != IRQT_PIRQ); in pirq_needs_eoi_flag()
510 return info->u.pirq.flags & PIRQ_NEEDS_EOI; in pirq_needs_eoi_flag()
517 struct irq_data *data = irq_get_irq_data(info->irq); in bind_evtchn_to_cpu()
523 xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu); in bind_evtchn_to_cpu()
526 info->cpu = cpu; in bind_evtchn_to_cpu()
531 * notify_remote_via_irq - send event to remote end of event channel via irq
557 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); in lateeoi_list_del()
560 spin_lock_irqsave(&eoi->eoi_list_lock, flags); in lateeoi_list_del()
561 list_del_init(&info->eoi_list); in lateeoi_list_del()
562 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in lateeoi_list_del()
567 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); in lateeoi_list_add()
573 if (now < info->eoi_time) in lateeoi_list_add()
574 delay = info->eoi_time - now; in lateeoi_list_add()
578 spin_lock_irqsave(&eoi->eoi_list_lock, flags); in lateeoi_list_add()
580 elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, in lateeoi_list_add()
582 if (!elem || info->eoi_time < elem->eoi_time) { in lateeoi_list_add()
583 list_add(&info->eoi_list, &eoi->eoi_list); in lateeoi_list_add()
584 mod_delayed_work_on(info->eoi_cpu, system_wq, in lateeoi_list_add()
585 &eoi->delayed, delay); in lateeoi_list_add()
587 list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) { in lateeoi_list_add()
588 if (elem->eoi_time <= info->eoi_time) in lateeoi_list_add()
591 list_add(&info->eoi_list, &elem->eoi_list); in lateeoi_list_add()
594 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in lateeoi_list_add()
603 evtchn = info->evtchn; in xen_irq_lateeoi_locked()
604 if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) in xen_irq_lateeoi_locked()
608 struct xenbus_device *dev = info->u.interdomain; in xen_irq_lateeoi_locked()
611 if (dev && dev->spurious_threshold) in xen_irq_lateeoi_locked()
612 threshold = dev->spurious_threshold; in xen_irq_lateeoi_locked()
614 if ((1 << info->spurious_cnt) < (HZ << 2)) { in xen_irq_lateeoi_locked()
615 if (info->spurious_cnt != 0xFF) in xen_irq_lateeoi_locked()
616 info->spurious_cnt++; in xen_irq_lateeoi_locked()
618 if (info->spurious_cnt > threshold) { in xen_irq_lateeoi_locked()
619 delay = 1 << (info->spurious_cnt - 1 - threshold); in xen_irq_lateeoi_locked()
622 if (!info->eoi_time) in xen_irq_lateeoi_locked()
623 info->eoi_cpu = smp_processor_id(); in xen_irq_lateeoi_locked()
624 info->eoi_time = get_jiffies_64() + delay; in xen_irq_lateeoi_locked()
626 atomic_add(delay, &dev->jiffies_eoi_delayed); in xen_irq_lateeoi_locked()
629 atomic_inc(&dev->spurious_events); in xen_irq_lateeoi_locked()
631 info->spurious_cnt = 0; in xen_irq_lateeoi_locked()
634 cpu = info->eoi_cpu; in xen_irq_lateeoi_locked()
635 if (info->eoi_time && in xen_irq_lateeoi_locked()
636 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) { in xen_irq_lateeoi_locked()
641 info->eoi_time = 0; in xen_irq_lateeoi_locked()
644 smp_store_release(&info->is_active, 0); in xen_irq_lateeoi_locked()
660 spin_lock_irqsave(&eoi->eoi_list_lock, flags); in xen_irq_lateeoi_worker()
662 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, in xen_irq_lateeoi_worker()
668 if (now < info->eoi_time) { in xen_irq_lateeoi_worker()
669 mod_delayed_work_on(info->eoi_cpu, system_wq, in xen_irq_lateeoi_worker()
670 &eoi->delayed, in xen_irq_lateeoi_worker()
671 info->eoi_time - now); in xen_irq_lateeoi_worker()
675 list_del_init(&info->eoi_list); in xen_irq_lateeoi_worker()
677 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in xen_irq_lateeoi_worker()
679 info->eoi_time = 0; in xen_irq_lateeoi_worker()
684 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); in xen_irq_lateeoi_worker()
693 INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); in xen_cpu_init_eoi()
694 spin_lock_init(&eoi->eoi_list_lock); in xen_cpu_init_eoi()
695 INIT_LIST_HEAD(&eoi->eoi_list); in xen_cpu_init_eoi()
719 info->irq = irq; in xen_irq_init()
720 info->type = IRQT_UNBOUND; in xen_irq_init()
721 info->refcnt = -1; in xen_irq_init()
722 INIT_RCU_WORK(&info->rwork, delayed_free_irq); in xen_irq_init()
731 INIT_LIST_HEAD(&info->eoi_list); in xen_irq_init()
732 list_add_tail(&info->list, &xen_irq_list_head); in xen_irq_init()
740 int irq = irq_alloc_desc_from(0, -1); in xen_allocate_irq_dynamic()
770 irq = irq_alloc_desc_at(gsi, -1); in xen_allocate_irq_gsi()
784 if (!list_empty(&info->eoi_list)) in xen_free_irq()
787 list_del(&info->list); in xen_free_irq()
789 WARN_ON(info->refcnt > 0); in xen_free_irq()
791 queue_rcu_work(system_wq, &info->rwork); in xen_free_irq()
797 smp_store_release(&info->is_active, 0); in event_handler_exit()
798 clear_evtchn(info->evtchn); in event_handler_exit()
809 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; in pirq_query_unmask()
811 info->u.pirq.flags |= PIRQ_NEEDS_EOI; in pirq_query_unmask()
819 if (!VALID_EVTCHN(info->evtchn)) in do_eoi_pirq()
832 struct irq_info *info = info_for_irq(data->irq); in eoi_pirq()
839 if (VALID_EVTCHN(info->evtchn)) in do_disable_dynirq()
845 struct irq_info *info = info_for_irq(data->irq); in disable_dynirq()
853 struct irq_info *info = info_for_irq(data->irq); in mask_ack_pirq()
864 evtchn_port_t evtchn = info->evtchn; in __startup_pirq()
872 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? in __startup_pirq()
876 pr_warn("Failed to obtain physical IRQ %d\n", info->irq); in __startup_pirq()
883 rc = set_evtchn_to_irq(evtchn, info->irq); in __startup_pirq()
887 info->evtchn = evtchn; in __startup_pirq()
902 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq, in __startup_pirq()
910 struct irq_info *info = info_for_irq(data->irq); in startup_pirq()
917 struct irq_info *info = info_for_irq(data->irq); in shutdown_pirq()
918 evtchn_port_t evtchn = info->evtchn; in shutdown_pirq()
920 BUG_ON(info->type != IRQT_PIRQ); in shutdown_pirq()
945 if (info->type != IRQT_PIRQ) in xen_irq_from_gsi()
948 if (info->u.pirq.gsi == gsi) in xen_irq_from_gsi()
949 return info->irq; in xen_irq_from_gsi()
952 return -1; in xen_irq_from_gsi()
966 if (info->refcnt > 0) { in __unbind_from_irq()
967 info->refcnt--; in __unbind_from_irq()
968 if (info->refcnt != 0) in __unbind_from_irq()
972 evtchn = info->evtchn; in __unbind_from_irq()
975 unsigned int cpu = info->cpu; in __unbind_from_irq()
978 if (!info->is_static) in __unbind_from_irq()
981 switch (info->type) { in __unbind_from_irq()
983 per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1; in __unbind_from_irq()
986 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1; in __unbind_from_irq()
990 dev = info->u.interdomain; in __unbind_from_irq()
992 atomic_dec(&dev->event_channels); in __unbind_from_irq()
1027 if (ret != -1) { in xen_bind_pirq_gsi_to_irq()
1037 irq_op.irq = info->irq; in xen_bind_pirq_gsi_to_irq()
1040 /* Only the privileged domain can do this. For non-priv, the pcifront in xen_bind_pirq_gsi_to_irq()
1046 ret = -ENOSPC; in xen_bind_pirq_gsi_to_irq()
1053 __unbind_from_irq(info, info->irq); in xen_bind_pirq_gsi_to_irq()
1074 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, in xen_bind_pirq_gsi_to_irq()
1077 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, in xen_bind_pirq_gsi_to_irq()
1080 ret = info->irq; in xen_bind_pirq_gsi_to_irq()
1097 WARN_ONCE(rc == -ENOSYS, in xen_allocate_pirq_msi()
1100 return rc ? -1 : op_get_free_pirq.pirq; in xen_allocate_pirq_msi()
1111 irq = irq_alloc_descs(-1, 0, nvec, -1); in xen_bind_pirq_msi_to_irq()
1118 ret = -ENOMEM; in xen_bind_pirq_msi_to_irq()
1138 while (nvec--) { in xen_bind_pirq_msi_to_irq()
1151 int rc = -ENOENT; in xen_destroy_irq()
1160 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { in xen_destroy_irq()
1161 unmap_irq.pirq = info->u.pirq.pirq; in xen_destroy_irq()
1162 unmap_irq.domid = info->u.pirq.domid; in xen_destroy_irq()
1168 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) in xen_destroy_irq()
1170 info->u.pirq.domid, info->u.pirq.pirq); in xen_destroy_irq()
1195 int ret = -ENOMEM; in bind_evtchn_to_irq_chip()
1199 return -ENOMEM; in bind_evtchn_to_irq_chip()
1210 irq_set_chip_and_handler_name(info->irq, chip, in bind_evtchn_to_irq_chip()
1215 __unbind_from_irq(info, info->irq); in bind_evtchn_to_irq_chip()
1226 } else if (!WARN_ON(info->type != IRQT_EVTCHN)) { in bind_evtchn_to_irq_chip()
1227 if (shared && !WARN_ON(info->refcnt < 0)) in bind_evtchn_to_irq_chip()
1228 info->refcnt++; in bind_evtchn_to_irq_chip()
1231 ret = info->irq; in bind_evtchn_to_irq_chip()
1262 if (ret == -1) { in bind_ipi_to_irq()
1267 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, in bind_ipi_to_irq()
1278 __unbind_from_irq(info, info->irq); in bind_ipi_to_irq()
1286 ret = info->irq; in bind_ipi_to_irq()
1289 WARN_ON(info == NULL || info->type != IRQT_IPI); in bind_ipi_to_irq()
1305 bind_interdomain.remote_dom = dev->otherend_id; in bind_interdomain_evtchn_to_irq_chip()
1327 int rc = -ENOENT; in find_virq()
1347 * xen_evtchn_nr_channels - number of usable event channel ports
1355 return evtchn_ops->nr_channels(); in xen_evtchn_nr_channels()
1370 if (ret == -1) { in bind_virq_to_irq()
1376 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, in bind_virq_to_irq()
1379 irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip, in bind_virq_to_irq()
1389 if (ret == -EEXIST) in bind_virq_to_irq()
1396 __unbind_from_irq(info, info->irq); in bind_virq_to_irq()
1405 ret = info->irq; in bind_virq_to_irq()
1408 WARN_ON(info == NULL || info->type != IRQT_VIRQ); in bind_virq_to_irq()
1558 * xen_set_irq_priority() - set an event channel priority.
1579 return -ENOENT; in evtchn_make_refcounted()
1581 WARN_ON(info->refcnt != -1); in evtchn_make_refcounted()
1583 info->refcnt = 1; in evtchn_make_refcounted()
1584 info->is_static = is_static; in evtchn_make_refcounted()
1593 int err = -ENOENT; in evtchn_get()
1596 return -EINVAL; in evtchn_get()
1605 err = -EINVAL; in evtchn_get()
1606 if (info->refcnt <= 0 || info->refcnt == SHRT_MAX) in evtchn_get()
1609 info->refcnt++; in evtchn_get()
1624 unbind_from_irq(info->irq); in evtchn_put()
1670 if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) { in handle_irq_for_port()
1673 if (!ctrl->timeout) { in handle_irq_for_port()
1676 ctrl->timeout = kt; in handle_irq_for_port()
1677 } else if (kt > ctrl->timeout) { in handle_irq_for_port()
1678 ctrl->defer_eoi = true; in handle_irq_for_port()
1682 if (xchg_acquire(&info->is_active, 1)) in handle_irq_for_port()
1685 dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL; in handle_irq_for_port()
1687 atomic_inc(&dev->events); in handle_irq_for_port()
1689 if (ctrl->defer_eoi) { in handle_irq_for_port()
1690 info->eoi_cpu = smp_processor_id(); in handle_irq_for_port()
1691 info->irq_epoch = __this_cpu_read(irq_epoch); in handle_irq_for_port()
1692 info->eoi_time = get_jiffies_64() + event_eoi_delay; in handle_irq_for_port()
1695 generic_handle_irq(info->irq); in handle_irq_for_port()
1701 int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE; in xen_evtchn_do_upcall()
1715 vcpu_info->evtchn_upcall_pending = 0; in xen_evtchn_do_upcall()
1723 } while (vcpu_info->evtchn_upcall_pending); in xen_evtchn_do_upcall()
1752 /* After resume the irq<->evtchn mappings are all cleared out */ in rebind_evtchn_irq()
1756 BUG_ON(info->type == IRQT_UNBOUND); in rebind_evtchn_irq()
1758 info->irq = irq; in rebind_evtchn_irq()
1763 bind_evtchn_to_cpu(info, info->cpu, false); in rebind_evtchn_irq()
1773 evtchn_port_t evtchn = info ? info->evtchn : 0; in xen_rebind_evtchn_to_cpu()
1776 return -1; in xen_rebind_evtchn_to_cpu()
1779 return -1; in xen_rebind_evtchn_to_cpu()
1794 * it, but don't do the xenlinux-level rebind in that case. in xen_rebind_evtchn_to_cpu()
1838 ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); in set_affinity_irq()
1847 struct irq_info *info = info_for_irq(data->irq); in enable_dynirq()
1848 evtchn_port_t evtchn = info ? info->evtchn : 0; in enable_dynirq()
1856 evtchn_port_t evtchn = info->evtchn; in do_ack_dynirq()
1864 struct irq_info *info = info_for_irq(data->irq); in ack_dynirq()
1872 struct irq_info *info = info_for_irq(data->irq); in mask_ack_dynirq()
1882 struct irq_info *info = info_for_irq(data->irq); in lateeoi_ack_dynirq()
1883 evtchn_port_t evtchn = info ? info->evtchn : 0; in lateeoi_ack_dynirq()
1889 * Need to keep is_active non-zero in order to ignore re-raised in lateeoi_ack_dynirq()
1898 struct irq_info *info = info_for_irq(data->irq); in lateeoi_mask_ack_dynirq()
1899 evtchn_port_t evtchn = info ? info->evtchn : 0; in lateeoi_mask_ack_dynirq()
1909 struct irq_info *info = info_for_irq(data->irq); in retrigger_dynirq()
1910 evtchn_port_t evtchn = info ? info->evtchn : 0; in retrigger_dynirq()
1929 if (info->type != IRQT_PIRQ) in restore_pirqs()
1932 pirq = info->u.pirq.pirq; in restore_pirqs()
1933 gsi = info->u.pirq.gsi; in restore_pirqs()
1934 irq = info->irq; in restore_pirqs()
1954 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); in restore_pirqs()
1968 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) in restore_cpu_virqs()
1997 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) in restore_cpu_ipis()
2021 evtchn_port_t evtchn = info ? info->evtchn : 0; in xen_clear_irq_pending()
2040 * the irq will be disabled so it won't deliver an interrupt. */
2058 * irq will be disabled so it won't deliver an interrupt. */
2071 return -ENOENT; in xen_test_irq_shared()
2073 irq_status.irq = info->u.pirq.pirq; in xen_test_irq_shared()
2086 /* New event-channel space is not 'live' yet. */ in xen_irq_resume()
2089 /* No IRQ <-> event-channel mappings. */ in xen_irq_resume()
2091 /* Zap event-channel binding */ in xen_irq_resume()
2092 info->evtchn = 0; in xen_irq_resume()
2108 .name = "xen-dyn",
2122 /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2123 .name = "xen-dyn-lateeoi",
2137 .name = "xen-pirq",
2157 .name = "xen-percpu",
2185 * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
2186 * fallback to the global vector-type callback.
2246 if (evtchn_ops->percpu_init) in xen_evtchn_cpu_prepare()
2247 ret = evtchn_ops->percpu_init(cpu); in xen_evtchn_cpu_prepare()
2256 if (evtchn_ops->percpu_deinit) in xen_evtchn_cpu_dead()
2257 ret = evtchn_ops->percpu_deinit(cpu); in xen_evtchn_cpu_dead()
2264 int ret = -EINVAL; in xen_init_IRQ()