Lines Matching refs:evt
208 static int xen_timerop_shutdown(struct clock_event_device *evt) in xen_timerop_shutdown() argument
217 struct clock_event_device *evt) in xen_timerop_set_next_event() argument
219 WARN_ON(!clockevent_state_oneshot(evt)); in xen_timerop_set_next_event()
248 static int xen_vcpuop_shutdown(struct clock_event_device *evt) in xen_vcpuop_shutdown() argument
261 static int xen_vcpuop_set_oneshot(struct clock_event_device *evt) in xen_vcpuop_set_oneshot() argument
273 struct clock_event_device *evt) in xen_vcpuop_set_next_event() argument
279 WARN_ON(!clockevent_state_oneshot(evt)); in xen_vcpuop_set_next_event()
314 struct clock_event_device evt; member
317 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
321 struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt); in xen_timer_interrupt() local
325 if (evt->event_handler) { in xen_timer_interrupt()
326 evt->event_handler(evt); in xen_timer_interrupt()
335 struct clock_event_device *evt; in xen_teardown_timer() local
336 evt = &per_cpu(xen_clock_events, cpu).evt; in xen_teardown_timer()
338 if (evt->irq >= 0) { in xen_teardown_timer()
339 unbind_from_irqhandler(evt->irq, NULL); in xen_teardown_timer()
340 evt->irq = -1; in xen_teardown_timer()
347 struct clock_event_device *evt = &xevt->evt; in xen_setup_timer() local
350 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); in xen_setup_timer()
351 if (evt->irq >= 0) in xen_setup_timer()
364 memcpy(evt, xen_clockevent, sizeof(*evt)); in xen_setup_timer()
366 evt->cpumask = cpumask_of(cpu); in xen_setup_timer()
367 evt->irq = irq; in xen_setup_timer()
373 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); in xen_setup_cpu_clockevents()