Lines Matching full:desc
44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_chip() local
46 if (!desc) in irq_set_chip()
49 desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); in irq_set_chip()
50 irq_put_desc_unlock(desc, flags); in irq_set_chip()
68 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_type() local
71 if (!desc) in irq_set_irq_type()
74 ret = __irq_set_trigger(desc, type); in irq_set_irq_type()
75 irq_put_desc_busunlock(desc, flags); in irq_set_irq_type()
90 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_handler_data() local
92 if (!desc) in irq_set_handler_data()
94 desc->irq_common_data.handler_data = data; in irq_set_handler_data()
95 irq_put_desc_unlock(desc, flags); in irq_set_handler_data()
112 …struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL… in irq_set_msi_desc_off() local
114 if (!desc) in irq_set_msi_desc_off()
116 desc->irq_common_data.msi_desc = entry; in irq_set_msi_desc_off()
119 irq_put_desc_unlock(desc, flags); in irq_set_msi_desc_off()
145 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_chip_data() local
147 if (!desc) in irq_set_chip_data()
149 desc->irq_data.chip_data = data; in irq_set_chip_data()
150 irq_put_desc_unlock(desc, flags); in irq_set_chip_data()
157 struct irq_desc *desc = irq_to_desc(irq); in irq_get_irq_data() local
159 return desc ? &desc->irq_data : NULL; in irq_get_irq_data()
163 static void irq_state_clr_disabled(struct irq_desc *desc) in irq_state_clr_disabled() argument
165 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); in irq_state_clr_disabled()
168 static void irq_state_clr_masked(struct irq_desc *desc) in irq_state_clr_masked() argument
170 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); in irq_state_clr_masked()
173 static void irq_state_clr_started(struct irq_desc *desc) in irq_state_clr_started() argument
175 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_clr_started()
178 static void irq_state_set_started(struct irq_desc *desc) in irq_state_set_started() argument
180 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_set_started()
191 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, in __irq_startup_managed() argument
194 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup_managed()
228 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, in __irq_startup_managed() argument
235 static int __irq_startup(struct irq_desc *desc) in __irq_startup() argument
237 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup()
245 irq_state_clr_disabled(desc); in __irq_startup()
246 irq_state_clr_masked(desc); in __irq_startup()
248 irq_enable(desc); in __irq_startup()
250 irq_state_set_started(desc); in __irq_startup()
254 int irq_startup(struct irq_desc *desc, bool resend, bool force) in irq_startup() argument
256 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_startup()
260 desc->depth = 0; in irq_startup()
263 irq_enable(desc); in irq_startup()
265 switch (__irq_startup_managed(desc, aff, force)) { in irq_startup()
268 irq_setup_affinity(desc); in irq_startup()
269 ret = __irq_startup(desc); in irq_startup()
271 irq_setup_affinity(desc); in irq_startup()
275 ret = __irq_startup(desc); in irq_startup()
283 check_irq_resend(desc, false); in irq_startup()
288 int irq_activate(struct irq_desc *desc) in irq_activate() argument
290 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_activate()
297 int irq_activate_and_startup(struct irq_desc *desc, bool resend) in irq_activate_and_startup() argument
299 if (WARN_ON(irq_activate(desc))) in irq_activate_and_startup()
301 return irq_startup(desc, resend, IRQ_START_FORCE); in irq_activate_and_startup()
304 static void __irq_disable(struct irq_desc *desc, bool mask);
306 void irq_shutdown(struct irq_desc *desc) in irq_shutdown() argument
308 if (irqd_is_started(&desc->irq_data)) { in irq_shutdown()
309 clear_irq_resend(desc); in irq_shutdown()
310 desc->depth = 1; in irq_shutdown()
311 if (desc->irq_data.chip->irq_shutdown) { in irq_shutdown()
312 desc->irq_data.chip->irq_shutdown(&desc->irq_data); in irq_shutdown()
313 irq_state_set_disabled(desc); in irq_shutdown()
314 irq_state_set_masked(desc); in irq_shutdown()
316 __irq_disable(desc, true); in irq_shutdown()
318 irq_state_clr_started(desc); in irq_shutdown()
323 void irq_shutdown_and_deactivate(struct irq_desc *desc) in irq_shutdown_and_deactivate() argument
325 irq_shutdown(desc); in irq_shutdown_and_deactivate()
332 irq_domain_deactivate_irq(&desc->irq_data); in irq_shutdown_and_deactivate()
335 void irq_enable(struct irq_desc *desc) in irq_enable() argument
337 if (!irqd_irq_disabled(&desc->irq_data)) { in irq_enable()
338 unmask_irq(desc); in irq_enable()
340 irq_state_clr_disabled(desc); in irq_enable()
341 if (desc->irq_data.chip->irq_enable) { in irq_enable()
342 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_enable()
343 irq_state_clr_masked(desc); in irq_enable()
345 unmask_irq(desc); in irq_enable()
350 static void __irq_disable(struct irq_desc *desc, bool mask) in __irq_disable() argument
352 if (irqd_irq_disabled(&desc->irq_data)) { in __irq_disable()
354 mask_irq(desc); in __irq_disable()
356 irq_state_set_disabled(desc); in __irq_disable()
357 if (desc->irq_data.chip->irq_disable) { in __irq_disable()
358 desc->irq_data.chip->irq_disable(&desc->irq_data); in __irq_disable()
359 irq_state_set_masked(desc); in __irq_disable()
361 mask_irq(desc); in __irq_disable()
368 * @desc: irq descriptor which should be disabled
386 void irq_disable(struct irq_desc *desc) in irq_disable() argument
388 __irq_disable(desc, irq_settings_disable_unlazy(desc)); in irq_disable()
391 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) in irq_percpu_enable() argument
393 if (desc->irq_data.chip->irq_enable) in irq_percpu_enable()
394 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_percpu_enable()
396 desc->irq_data.chip->irq_unmask(&desc->irq_data); in irq_percpu_enable()
397 cpumask_set_cpu(cpu, desc->percpu_enabled); in irq_percpu_enable()
400 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) in irq_percpu_disable() argument
402 if (desc->irq_data.chip->irq_disable) in irq_percpu_disable()
403 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_percpu_disable()
405 desc->irq_data.chip->irq_mask(&desc->irq_data); in irq_percpu_disable()
406 cpumask_clear_cpu(cpu, desc->percpu_enabled); in irq_percpu_disable()
409 static inline void mask_ack_irq(struct irq_desc *desc) in mask_ack_irq() argument
411 if (desc->irq_data.chip->irq_mask_ack) { in mask_ack_irq()
412 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); in mask_ack_irq()
413 irq_state_set_masked(desc); in mask_ack_irq()
415 mask_irq(desc); in mask_ack_irq()
416 if (desc->irq_data.chip->irq_ack) in mask_ack_irq()
417 desc->irq_data.chip->irq_ack(&desc->irq_data); in mask_ack_irq()
421 void mask_irq(struct irq_desc *desc) in mask_irq() argument
423 if (irqd_irq_masked(&desc->irq_data)) in mask_irq()
426 if (desc->irq_data.chip->irq_mask) { in mask_irq()
427 desc->irq_data.chip->irq_mask(&desc->irq_data); in mask_irq()
428 irq_state_set_masked(desc); in mask_irq()
432 void unmask_irq(struct irq_desc *desc) in unmask_irq() argument
434 if (!irqd_irq_masked(&desc->irq_data)) in unmask_irq()
437 if (desc->irq_data.chip->irq_unmask) { in unmask_irq()
438 desc->irq_data.chip->irq_unmask(&desc->irq_data); in unmask_irq()
439 irq_state_clr_masked(desc); in unmask_irq()
443 void unmask_threaded_irq(struct irq_desc *desc) in unmask_threaded_irq() argument
445 struct irq_chip *chip = desc->irq_data.chip; in unmask_threaded_irq()
448 chip->irq_eoi(&desc->irq_data); in unmask_threaded_irq()
450 unmask_irq(desc); in unmask_threaded_irq()
463 struct irq_desc *desc = irq_to_desc(irq); in handle_nested_irq() local
469 raw_spin_lock_irq(&desc->lock); in handle_nested_irq()
471 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_nested_irq()
473 action = desc->action; in handle_nested_irq()
474 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { in handle_nested_irq()
475 desc->istate |= IRQS_PENDING; in handle_nested_irq()
476 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
480 kstat_incr_irqs_this_cpu(desc); in handle_nested_irq()
481 atomic_inc(&desc->threads_active); in handle_nested_irq()
482 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
485 for_each_action_of_desc(desc, action) in handle_nested_irq()
488 if (!irq_settings_no_debug(desc)) in handle_nested_irq()
489 note_interrupt(desc, action_ret); in handle_nested_irq()
491 wake_threads_waitq(desc); in handle_nested_irq()
495 static bool irq_check_poll(struct irq_desc *desc) in irq_check_poll() argument
497 if (!(desc->istate & IRQS_POLL_INPROGRESS)) in irq_check_poll()
499 return irq_wait_for_poll(desc); in irq_check_poll()
502 static bool irq_may_run(struct irq_desc *desc) in irq_may_run() argument
510 if (!irqd_has_set(&desc->irq_data, mask)) in irq_may_run()
518 if (irq_pm_check_wakeup(desc)) in irq_may_run()
524 return irq_check_poll(desc); in irq_may_run()
529 * @desc: the interrupt description structure for this irq
538 void handle_simple_irq(struct irq_desc *desc) in handle_simple_irq() argument
540 raw_spin_lock(&desc->lock); in handle_simple_irq()
542 if (!irq_may_run(desc)) in handle_simple_irq()
545 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_simple_irq()
547 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_simple_irq()
548 desc->istate |= IRQS_PENDING; in handle_simple_irq()
552 kstat_incr_irqs_this_cpu(desc); in handle_simple_irq()
553 handle_irq_event(desc); in handle_simple_irq()
556 raw_spin_unlock(&desc->lock); in handle_simple_irq()
562 * @desc: the interrupt description structure for this irq
573 void handle_untracked_irq(struct irq_desc *desc) in handle_untracked_irq() argument
575 raw_spin_lock(&desc->lock); in handle_untracked_irq()
577 if (!irq_may_run(desc)) in handle_untracked_irq()
580 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_untracked_irq()
582 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_untracked_irq()
583 desc->istate |= IRQS_PENDING; in handle_untracked_irq()
587 desc->istate &= ~IRQS_PENDING; in handle_untracked_irq()
588 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
589 raw_spin_unlock(&desc->lock); in handle_untracked_irq()
591 __handle_irq_event_percpu(desc); in handle_untracked_irq()
593 raw_spin_lock(&desc->lock); in handle_untracked_irq()
594 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
597 raw_spin_unlock(&desc->lock); in handle_untracked_irq()
605 static void cond_unmask_irq(struct irq_desc *desc) in cond_unmask_irq() argument
614 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_irq()
615 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) in cond_unmask_irq()
616 unmask_irq(desc); in cond_unmask_irq()
621 * @desc: the interrupt description structure for this irq
628 void handle_level_irq(struct irq_desc *desc) in handle_level_irq() argument
630 raw_spin_lock(&desc->lock); in handle_level_irq()
631 mask_ack_irq(desc); in handle_level_irq()
633 if (!irq_may_run(desc)) in handle_level_irq()
636 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_level_irq()
642 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_level_irq()
643 desc->istate |= IRQS_PENDING; in handle_level_irq()
647 kstat_incr_irqs_this_cpu(desc); in handle_level_irq()
648 handle_irq_event(desc); in handle_level_irq()
650 cond_unmask_irq(desc); in handle_level_irq()
653 raw_spin_unlock(&desc->lock); in handle_level_irq()
657 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) in cond_unmask_eoi_irq() argument
659 if (!(desc->istate & IRQS_ONESHOT)) { in cond_unmask_eoi_irq()
660 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
669 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_eoi_irq()
670 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { in cond_unmask_eoi_irq()
671 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
672 unmask_irq(desc); in cond_unmask_eoi_irq()
674 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
680 * @desc: the interrupt description structure for this irq
687 void handle_fasteoi_irq(struct irq_desc *desc) in handle_fasteoi_irq() argument
689 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_irq()
691 raw_spin_lock(&desc->lock); in handle_fasteoi_irq()
698 if (!irq_may_run(desc)) { in handle_fasteoi_irq()
699 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) in handle_fasteoi_irq()
700 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
704 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_irq()
710 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_irq()
711 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
712 mask_irq(desc); in handle_fasteoi_irq()
716 kstat_incr_irqs_this_cpu(desc); in handle_fasteoi_irq()
717 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_irq()
718 mask_irq(desc); in handle_fasteoi_irq()
720 handle_irq_event(desc); in handle_fasteoi_irq()
722 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_irq()
727 if (unlikely(desc->istate & IRQS_PENDING)) in handle_fasteoi_irq()
728 check_irq_resend(desc, false); in handle_fasteoi_irq()
730 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
734 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_irq()
735 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
741 * @desc: the interrupt description structure for this irq
751 void handle_fasteoi_nmi(struct irq_desc *desc) in handle_fasteoi_nmi() argument
753 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_fasteoi_nmi()
754 struct irqaction *action = desc->action; in handle_fasteoi_nmi()
755 unsigned int irq = irq_desc_get_irq(desc); in handle_fasteoi_nmi()
758 __kstat_incr_irqs_this_cpu(desc); in handle_fasteoi_nmi()
768 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_nmi()
774 * @desc: the interrupt description structure for this irq
787 void handle_edge_irq(struct irq_desc *desc) in handle_edge_irq() argument
789 raw_spin_lock(&desc->lock); in handle_edge_irq()
791 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_irq()
793 if (!irq_may_run(desc)) { in handle_edge_irq()
794 desc->istate |= IRQS_PENDING; in handle_edge_irq()
795 mask_ack_irq(desc); in handle_edge_irq()
803 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_irq()
804 desc->istate |= IRQS_PENDING; in handle_edge_irq()
805 mask_ack_irq(desc); in handle_edge_irq()
809 kstat_incr_irqs_this_cpu(desc); in handle_edge_irq()
812 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_edge_irq()
815 if (unlikely(!desc->action)) { in handle_edge_irq()
816 mask_irq(desc); in handle_edge_irq()
825 if (unlikely(desc->istate & IRQS_PENDING)) { in handle_edge_irq()
826 if (!irqd_irq_disabled(&desc->irq_data) && in handle_edge_irq()
827 irqd_irq_masked(&desc->irq_data)) in handle_edge_irq()
828 unmask_irq(desc); in handle_edge_irq()
831 handle_irq_event(desc); in handle_edge_irq()
833 } while ((desc->istate & IRQS_PENDING) && in handle_edge_irq()
834 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_irq()
837 raw_spin_unlock(&desc->lock); in handle_edge_irq()
844 * @desc: the interrupt description structure for this irq
849 void handle_edge_eoi_irq(struct irq_desc *desc) in handle_edge_eoi_irq() argument
851 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_edge_eoi_irq()
853 raw_spin_lock(&desc->lock); in handle_edge_eoi_irq()
855 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_eoi_irq()
857 if (!irq_may_run(desc)) { in handle_edge_eoi_irq()
858 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
866 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_eoi_irq()
867 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
871 kstat_incr_irqs_this_cpu(desc); in handle_edge_eoi_irq()
874 if (unlikely(!desc->action)) in handle_edge_eoi_irq()
877 handle_irq_event(desc); in handle_edge_eoi_irq()
879 } while ((desc->istate & IRQS_PENDING) && in handle_edge_eoi_irq()
880 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_eoi_irq()
883 chip->irq_eoi(&desc->irq_data); in handle_edge_eoi_irq()
884 raw_spin_unlock(&desc->lock); in handle_edge_eoi_irq()
890 * @desc: the interrupt description structure for this irq
894 void handle_percpu_irq(struct irq_desc *desc) in handle_percpu_irq() argument
896 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_irq()
900 * desc->tot_count. in handle_percpu_irq()
902 __kstat_incr_irqs_this_cpu(desc); in handle_percpu_irq()
905 chip->irq_ack(&desc->irq_data); in handle_percpu_irq()
907 handle_irq_event_percpu(desc); in handle_percpu_irq()
910 chip->irq_eoi(&desc->irq_data); in handle_percpu_irq()
915 * @desc: the interrupt description structure for this irq
924 void handle_percpu_devid_irq(struct irq_desc *desc) in handle_percpu_devid_irq() argument
926 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_irq()
927 struct irqaction *action = desc->action; in handle_percpu_devid_irq()
928 unsigned int irq = irq_desc_get_irq(desc); in handle_percpu_devid_irq()
933 * desc->tot_count. in handle_percpu_devid_irq()
935 __kstat_incr_irqs_this_cpu(desc); in handle_percpu_devid_irq()
938 chip->irq_ack(&desc->irq_data); in handle_percpu_devid_irq()
946 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in handle_percpu_devid_irq()
949 irq_percpu_disable(desc, cpu); in handle_percpu_devid_irq()
956 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_irq()
962 * @desc: the interrupt description structure for this irq
967 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) in handle_percpu_devid_fasteoi_nmi() argument
969 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_fasteoi_nmi()
970 struct irqaction *action = desc->action; in handle_percpu_devid_fasteoi_nmi()
971 unsigned int irq = irq_desc_get_irq(desc); in handle_percpu_devid_fasteoi_nmi()
974 __kstat_incr_irqs_this_cpu(desc); in handle_percpu_devid_fasteoi_nmi()
981 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_fasteoi_nmi()
985 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, in __irq_do_set_handler() argument
991 struct irq_data *irq_data = &desc->irq_data; in __irq_do_set_handler()
1020 if (desc->irq_data.chip != &no_irq_chip) in __irq_do_set_handler()
1021 mask_ack_irq(desc); in __irq_do_set_handler()
1022 irq_state_set_disabled(desc); in __irq_do_set_handler()
1024 desc->action = NULL; in __irq_do_set_handler()
1025 WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); in __irq_do_set_handler()
1027 desc->depth = 1; in __irq_do_set_handler()
1029 desc->handle_irq = handle; in __irq_do_set_handler()
1030 desc->name = name; in __irq_do_set_handler()
1033 unsigned int type = irqd_get_trigger_type(&desc->irq_data); in __irq_do_set_handler()
1044 __irq_set_trigger(desc, type); in __irq_do_set_handler()
1045 desc->handle_irq = handle; in __irq_do_set_handler()
1048 irq_settings_set_noprobe(desc); in __irq_do_set_handler()
1049 irq_settings_set_norequest(desc); in __irq_do_set_handler()
1050 irq_settings_set_nothread(desc); in __irq_do_set_handler()
1051 desc->action = &chained_action; in __irq_do_set_handler()
1052 WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); in __irq_do_set_handler()
1053 irq_activate_and_startup(desc, IRQ_RESEND); in __irq_do_set_handler()
1062 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); in __irq_set_handler() local
1064 if (!desc) in __irq_set_handler()
1067 __irq_do_set_handler(desc, handle, is_chained, name); in __irq_set_handler()
1068 irq_put_desc_busunlock(desc, flags); in __irq_set_handler()
1077 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_chained_handler_and_data() local
1079 if (!desc) in irq_set_chained_handler_and_data()
1082 desc->irq_common_data.handler_data = data; in irq_set_chained_handler_and_data()
1083 __irq_do_set_handler(desc, handle, 1, NULL); in irq_set_chained_handler_and_data()
1085 irq_put_desc_busunlock(desc, flags); in irq_set_chained_handler_and_data()
1101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_modify_status() local
1103 if (!desc) in irq_modify_status()
1110 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); in irq_modify_status()
1112 irq_settings_clr_and_set(desc, clr, set); in irq_modify_status()
1114 trigger = irqd_get_trigger_type(&desc->irq_data); in irq_modify_status()
1116 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | in irq_modify_status()
1118 if (irq_settings_has_no_balance_set(desc)) in irq_modify_status()
1119 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in irq_modify_status()
1120 if (irq_settings_is_per_cpu(desc)) in irq_modify_status()
1121 irqd_set(&desc->irq_data, IRQD_PER_CPU); in irq_modify_status()
1122 if (irq_settings_can_move_pcntxt(desc)) in irq_modify_status()
1123 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); in irq_modify_status()
1124 if (irq_settings_is_level(desc)) in irq_modify_status()
1125 irqd_set(&desc->irq_data, IRQD_LEVEL); in irq_modify_status()
1127 tmp = irq_settings_get_trigger_mask(desc); in irq_modify_status()
1131 irqd_set(&desc->irq_data, trigger); in irq_modify_status()
1133 irq_put_desc_unlock(desc, flags); in irq_modify_status()
1146 struct irq_desc *desc; in irq_cpu_online() local
1152 desc = irq_to_desc(irq); in irq_cpu_online()
1153 if (!desc) in irq_cpu_online()
1156 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_online()
1158 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_online()
1161 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_online()
1162 chip->irq_cpu_online(&desc->irq_data); in irq_cpu_online()
1164 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_online()
1176 struct irq_desc *desc; in irq_cpu_offline() local
1182 desc = irq_to_desc(irq); in irq_cpu_offline()
1183 if (!desc) in irq_cpu_offline()
1186 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_offline()
1188 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_offline()
1191 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_offline()
1192 chip->irq_cpu_offline(&desc->irq_data); in irq_cpu_offline()
1194 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_offline()
1206 * @desc: the interrupt description structure for this irq
1212 void handle_fasteoi_ack_irq(struct irq_desc *desc) in handle_fasteoi_ack_irq() argument
1214 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_ack_irq()
1216 raw_spin_lock(&desc->lock); in handle_fasteoi_ack_irq()
1218 if (!irq_may_run(desc)) in handle_fasteoi_ack_irq()
1221 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_ack_irq()
1227 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_ack_irq()
1228 desc->istate |= IRQS_PENDING; in handle_fasteoi_ack_irq()
1229 mask_irq(desc); in handle_fasteoi_ack_irq()
1233 kstat_incr_irqs_this_cpu(desc); in handle_fasteoi_ack_irq()
1234 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_ack_irq()
1235 mask_irq(desc); in handle_fasteoi_ack_irq()
1238 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_fasteoi_ack_irq()
1240 handle_irq_event(desc); in handle_fasteoi_ack_irq()
1242 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_ack_irq()
1244 raw_spin_unlock(&desc->lock); in handle_fasteoi_ack_irq()
1248 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_ack_irq()
1249 raw_spin_unlock(&desc->lock); in handle_fasteoi_ack_irq()
1257 * @desc: the interrupt description structure for this irq
1263 void handle_fasteoi_mask_irq(struct irq_desc *desc) in handle_fasteoi_mask_irq() argument
1265 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_mask_irq()
1267 raw_spin_lock(&desc->lock); in handle_fasteoi_mask_irq()
1268 mask_ack_irq(desc); in handle_fasteoi_mask_irq()
1270 if (!irq_may_run(desc)) in handle_fasteoi_mask_irq()
1273 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_mask_irq()
1279 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_mask_irq()
1280 desc->istate |= IRQS_PENDING; in handle_fasteoi_mask_irq()
1281 mask_irq(desc); in handle_fasteoi_mask_irq()
1285 kstat_incr_irqs_this_cpu(desc); in handle_fasteoi_mask_irq()
1286 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_mask_irq()
1287 mask_irq(desc); in handle_fasteoi_mask_irq()
1289 handle_irq_event(desc); in handle_fasteoi_mask_irq()
1291 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_mask_irq()
1293 raw_spin_unlock(&desc->lock); in handle_fasteoi_mask_irq()
1297 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_mask_irq()
1298 raw_spin_unlock(&desc->lock); in handle_fasteoi_mask_irq()