Lines Matching refs:desc
38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) in __synchronize_hardirq() argument
40 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
102 if (desc) { in synchronize_hardirq()
103 __synchronize_hardirq(desc, false); in synchronize_hardirq()
104 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
111 static void __synchronize_irq(struct irq_desc *desc) in __synchronize_irq() argument
113 __synchronize_hardirq(desc, true); in __synchronize_irq()
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); in __synchronize_irq()
138 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq() local
140 if (desc) in synchronize_irq()
141 __synchronize_irq(desc); in synchronize_irq()
148 static bool __irq_can_set_affinity(struct irq_desc *desc) in __irq_can_set_affinity() argument
150 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
175 struct irq_desc *desc = irq_to_desc(irq); in irq_can_set_affinity_usr() local
177 return __irq_can_set_affinity(desc) && in irq_can_set_affinity_usr()
178 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
190 void irq_set_thread_affinity(struct irq_desc *desc) in irq_set_thread_affinity() argument
194 for_each_action_of_desc(desc, action) { in irq_set_thread_affinity()
227 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() local
285 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
289 irq_set_thread_affinity(desc); in irq_do_set_affinity()
300 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_pending() local
303 irq_copy_pending(desc, dest); in irq_set_affinity_pending()
332 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_deactivated() local
347 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated()
357 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_locked() local
370 irq_copy_pending(desc, mask); in irq_set_affinity_locked()
373 if (desc->affinity_notify) { in irq_set_affinity_locked()
374 kref_get(&desc->affinity_notify->kref); in irq_set_affinity_locked()
375 if (!schedule_work(&desc->affinity_notify->work)) { in irq_set_affinity_locked()
377 kref_put(&desc->affinity_notify->kref, in irq_set_affinity_locked()
378 desc->affinity_notify->release); in irq_set_affinity_locked()
404 struct irq_desc *desc; in irq_update_affinity_desc() local
416 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_update_affinity_desc()
417 if (!desc) in irq_update_affinity_desc()
421 if (irqd_is_started(&desc->irq_data)) { in irq_update_affinity_desc()
427 if (irqd_affinity_is_managed(&desc->irq_data)) { in irq_update_affinity_desc()
436 activated = irqd_is_activated(&desc->irq_data); in irq_update_affinity_desc()
438 irq_domain_deactivate_irq(&desc->irq_data); in irq_update_affinity_desc()
441 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); in irq_update_affinity_desc()
442 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); in irq_update_affinity_desc()
445 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); in irq_update_affinity_desc()
449 irq_domain_activate_irq(&desc->irq_data, false); in irq_update_affinity_desc()
452 irq_put_desc_busunlock(desc, flags); in irq_update_affinity_desc()
459 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity() local
463 if (!desc) in __irq_set_affinity()
466 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity()
467 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
468 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity()
506 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __irq_apply_affinity_hint() local
508 if (!desc) in __irq_apply_affinity_hint()
510 desc->affinity_hint = m; in __irq_apply_affinity_hint()
511 irq_put_desc_unlock(desc, flags); in __irq_apply_affinity_hint()
522 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify() local
526 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
529 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_notify()
530 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
531 irq_get_pending(cpumask, desc); in irq_affinity_notify()
533 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
534 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_notify()
557 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier() local
564 if (!desc || irq_is_nmi(desc)) in irq_set_affinity_notifier()
574 raw_spin_lock_irqsave(&desc->lock, flags); in irq_set_affinity_notifier()
575 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
576 desc->affinity_notify = notify; in irq_set_affinity_notifier()
577 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_set_affinity_notifier()
595 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
598 int ret, node = irq_desc_get_node(desc); in irq_setup_affinity()
603 if (!__irq_can_set_affinity(desc)) in irq_setup_affinity()
611 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
612 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
613 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity()
615 set = desc->irq_common_data.affinity; in irq_setup_affinity()
617 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
631 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
637 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
639 return irq_select_affinity(irq_desc_get_irq(desc)); in irq_setup_affinity()
659 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity() local
664 if (!desc) in irq_set_vcpu_affinity()
667 data = irq_desc_get_irq_data(desc); in irq_set_vcpu_affinity()
681 irq_put_desc_unlock(desc, flags); in irq_set_vcpu_affinity()
687 void __disable_irq(struct irq_desc *desc) in __disable_irq() argument
689 if (!desc->depth++) in __disable_irq()
690 irq_disable(desc); in __disable_irq()
696 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync() local
698 if (!desc) in __disable_irq_nosync()
700 __disable_irq(desc); in __disable_irq_nosync()
701 irq_put_desc_busunlock(desc, flags); in __disable_irq_nosync()
785 void __enable_irq(struct irq_desc *desc) in __enable_irq() argument
787 switch (desc->depth) { in __enable_irq()
791 irq_desc_get_irq(desc)); in __enable_irq()
794 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
797 irq_settings_set_noprobe(desc); in __enable_irq()
809 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); in __enable_irq()
813 desc->depth--; in __enable_irq()
831 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq() local
833 if (!desc) in enable_irq()
835 if (WARN(!desc->irq_data.chip, in enable_irq()
839 __enable_irq(desc); in enable_irq()
841 irq_put_desc_busunlock(desc, flags); in enable_irq()
861 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real() local
864 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
867 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
868 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
895 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake() local
898 if (!desc) in irq_set_irq_wake()
902 if (irq_is_nmi(desc)) { in irq_set_irq_wake()
911 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
914 desc->wake_depth = 0; in irq_set_irq_wake()
916 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
919 if (desc->wake_depth == 0) { in irq_set_irq_wake()
921 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
924 desc->wake_depth = 1; in irq_set_irq_wake()
926 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
931 irq_put_desc_busunlock(desc, flags); in irq_set_irq_wake()
944 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq() local
947 if (!desc) in can_request_irq()
950 if (irq_settings_can_request(desc)) { in can_request_irq()
951 if (!desc->action || in can_request_irq()
952 irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
955 irq_put_desc_unlock(desc, flags); in can_request_irq()
959 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) in __irq_set_trigger() argument
961 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
970 irq_desc_get_irq(desc), in __irq_set_trigger()
976 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
977 mask_irq(desc); in __irq_set_trigger()
978 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
984 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
989 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
990 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
994 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
995 irq_settings_set_trigger_mask(desc, flags); in __irq_set_trigger()
996 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
997 irq_settings_clr_level(desc); in __irq_set_trigger()
999 irq_settings_set_level(desc); in __irq_set_trigger()
1000 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
1007 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
1010 unmask_irq(desc); in __irq_set_trigger()
1018 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent() local
1020 if (!desc) in irq_set_parent()
1023 desc->parent_irq = parent_irq; in irq_set_parent()
1025 irq_put_desc_unlock(desc, flags); in irq_set_parent()
1061 static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) in irq_thread_check_affinity() argument
1080 raw_spin_lock_irq(&desc->lock); in irq_thread_check_affinity()
1085 if (cpumask_available(desc->irq_common_data.affinity)) { in irq_thread_check_affinity()
1088 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1092 raw_spin_unlock_irq(&desc->lock); in irq_thread_check_affinity()
1099 static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } in irq_thread_check_affinity() argument
1102 static int irq_wait_for_interrupt(struct irq_desc *desc, in irq_wait_for_interrupt() argument
1107 irq_thread_check_affinity(desc, action); in irq_wait_for_interrupt()
1134 static void irq_finalize_oneshot(struct irq_desc *desc, in irq_finalize_oneshot() argument
1137 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
1141 chip_bus_lock(desc); in irq_finalize_oneshot()
1142 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
1158 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
1159 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1160 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1173 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
1175 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
1176 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
1177 unmask_threaded_irq(desc); in irq_finalize_oneshot()
1180 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1181 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1191 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) in irq_forced_thread_fn() argument
1200 atomic_inc(&desc->threads_handled); in irq_forced_thread_fn()
1202 irq_finalize_oneshot(desc, action); in irq_forced_thread_fn()
1214 static irqreturn_t irq_thread_fn(struct irq_desc *desc, in irq_thread_fn() argument
1221 atomic_inc(&desc->threads_handled); in irq_thread_fn()
1223 irq_finalize_oneshot(desc, action); in irq_thread_fn()
1227 void wake_threads_waitq(struct irq_desc *desc) in wake_threads_waitq() argument
1229 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
1230 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
1236 struct irq_desc *desc; in irq_thread_dtor() local
1248 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1254 wake_threads_waitq(desc); in irq_thread_dtor()
1257 irq_finalize_oneshot(desc, action); in irq_thread_dtor()
1260 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) in irq_wake_secondary() argument
1267 raw_spin_lock_irq(&desc->lock); in irq_wake_secondary()
1268 __irq_wake_thread(desc, secondary); in irq_wake_secondary()
1269 raw_spin_unlock_irq(&desc->lock); in irq_wake_secondary()
1275 static void irq_thread_set_ready(struct irq_desc *desc, in irq_thread_set_ready() argument
1279 wake_up(&desc->wait_for_threads); in irq_thread_set_ready()
1286 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, in wake_up_and_wait_for_irq_thread_ready() argument
1293 wait_event(desc->wait_for_threads, in wake_up_and_wait_for_irq_thread_ready()
1304 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread() local
1305 irqreturn_t (*handler_fn)(struct irq_desc *desc, in irq_thread()
1308 irq_thread_set_ready(desc, action); in irq_thread()
1321 while (!irq_wait_for_interrupt(desc, action)) { in irq_thread()
1324 action_ret = handler_fn(desc, action); in irq_thread()
1326 irq_wake_secondary(desc, action); in irq_thread()
1328 wake_threads_waitq(desc); in irq_thread()
1349 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread() local
1353 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in irq_wake_thread()
1356 raw_spin_lock_irqsave(&desc->lock, flags); in irq_wake_thread()
1357 for_each_action_of_desc(desc, action) { in irq_wake_thread()
1360 __irq_wake_thread(desc, action); in irq_wake_thread()
1364 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_wake_thread()
1407 static int irq_request_resources(struct irq_desc *desc) in irq_request_resources() argument
1409 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1415 static void irq_release_resources(struct irq_desc *desc) in irq_release_resources() argument
1417 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1424 static bool irq_supports_nmi(struct irq_desc *desc) in irq_supports_nmi() argument
1426 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1440 static int irq_nmi_setup(struct irq_desc *desc) in irq_nmi_setup() argument
1442 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1448 static void irq_nmi_teardown(struct irq_desc *desc) in irq_nmi_teardown() argument
1450 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1507 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1513 if (!desc) in __setup_irq()
1516 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1518 if (!try_module_get(desc->owner)) in __setup_irq()
1528 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1534 nested = irq_settings_is_nested_thread(desc); in __setup_irq()
1547 if (irq_settings_can_thread(desc)) { in __setup_irq()
1579 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1589 mutex_lock(&desc->request_mutex); in __setup_irq()
1596 chip_bus_lock(desc); in __setup_irq()
1599 if (!desc->action) { in __setup_irq()
1600 ret = irq_request_resources(desc); in __setup_irq()
1603 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1614 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1615 old_ptr = &desc->action; in __setup_irq()
1628 if (irq_is_nmi(desc)) { in __setup_irq()
1630 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1639 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1640 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1643 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1712 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1737 ret = __irq_set_trigger(desc, in __setup_irq()
1755 ret = irq_activate(desc); in __setup_irq()
1759 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1761 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1764 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1765 irq_settings_set_per_cpu(desc); in __setup_irq()
1767 irq_settings_set_no_debug(desc); in __setup_irq()
1771 irq_settings_set_no_debug(desc); in __setup_irq()
1774 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1778 irq_settings_set_no_balancing(desc); in __setup_irq()
1779 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1783 irq_settings_can_autoenable(desc)) { in __setup_irq()
1784 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); in __setup_irq()
1794 desc->depth = 1; in __setup_irq()
1799 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1809 irq_pm_install_action(desc, new); in __setup_irq()
1812 desc->irq_count = 0; in __setup_irq()
1813 desc->irqs_unhandled = 0; in __setup_irq()
1819 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1820 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1821 __enable_irq(desc); in __setup_irq()
1824 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1825 chip_bus_sync_unlock(desc); in __setup_irq()
1826 mutex_unlock(&desc->request_mutex); in __setup_irq()
1828 irq_setup_timings(desc, new); in __setup_irq()
1830 wake_up_and_wait_for_irq_thread_ready(desc, new); in __setup_irq()
1831 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); in __setup_irq()
1833 register_irq_proc(irq, desc); in __setup_irq()
1849 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1851 if (!desc->action) in __setup_irq()
1852 irq_release_resources(desc); in __setup_irq()
1854 chip_bus_sync_unlock(desc); in __setup_irq()
1855 mutex_unlock(&desc->request_mutex); in __setup_irq()
1871 module_put(desc->owner); in __setup_irq()
1879 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) in __free_irq() argument
1881 unsigned irq = desc->irq_data.irq; in __free_irq()
1887 mutex_lock(&desc->request_mutex); in __free_irq()
1888 chip_bus_lock(desc); in __free_irq()
1889 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1895 action_ptr = &desc->action; in __free_irq()
1901 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1902 chip_bus_sync_unlock(desc); in __free_irq()
1903 mutex_unlock(&desc->request_mutex); in __free_irq()
1915 irq_pm_remove_action(desc, action); in __free_irq()
1918 if (!desc->action) { in __free_irq()
1919 irq_settings_clr_disable_unlazy(desc); in __free_irq()
1921 irq_shutdown(desc); in __free_irq()
1926 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1927 desc->affinity_hint = NULL; in __free_irq()
1930 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1945 chip_bus_sync_unlock(desc); in __free_irq()
1954 __synchronize_irq(desc); in __free_irq()
1985 if (!desc->action) { in __free_irq()
1990 chip_bus_lock(desc); in __free_irq()
1995 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1996 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1997 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1999 irq_release_resources(desc); in __free_irq()
2000 chip_bus_sync_unlock(desc); in __free_irq()
2001 irq_remove_timings(desc); in __free_irq()
2004 mutex_unlock(&desc->request_mutex); in __free_irq()
2006 irq_chip_pm_put(&desc->irq_data); in __free_irq()
2007 module_put(desc->owner); in __free_irq()
2030 struct irq_desc *desc = irq_to_desc(irq); in free_irq() local
2034 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_irq()
2038 if (WARN_ON(desc->affinity_notify)) in free_irq()
2039 desc->affinity_notify = NULL; in free_irq()
2042 action = __free_irq(desc, dev_id); in free_irq()
2054 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) in __cleanup_nmi() argument
2058 desc->istate &= ~IRQS_NMI; in __cleanup_nmi()
2060 if (!WARN_ON(desc->action == NULL)) { in __cleanup_nmi()
2061 irq_pm_remove_action(desc, desc->action); in __cleanup_nmi()
2062 devname = desc->action->name; in __cleanup_nmi()
2063 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
2065 kfree(desc->action); in __cleanup_nmi()
2066 desc->action = NULL; in __cleanup_nmi()
2069 irq_settings_clr_disable_unlazy(desc); in __cleanup_nmi()
2070 irq_shutdown_and_deactivate(desc); in __cleanup_nmi()
2072 irq_release_resources(desc); in __cleanup_nmi()
2074 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
2075 module_put(desc->owner); in __cleanup_nmi()
2082 struct irq_desc *desc = irq_to_desc(irq); in free_nmi() local
2086 if (!desc || WARN_ON(!irq_is_nmi(desc))) in free_nmi()
2089 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_nmi()
2093 if (WARN_ON(desc->depth == 0)) in free_nmi()
2096 raw_spin_lock_irqsave(&desc->lock, flags); in free_nmi()
2098 irq_nmi_teardown(desc); in free_nmi()
2099 devname = __cleanup_nmi(irq, desc); in free_nmi()
2101 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_nmi()
2153 struct irq_desc *desc; in request_threaded_irq() local
2178 desc = irq_to_desc(irq); in request_threaded_irq()
2179 if (!desc) in request_threaded_irq()
2182 if (!irq_settings_can_request(desc) || in request_threaded_irq()
2183 WARN_ON(irq_settings_is_per_cpu_devid(desc))) in request_threaded_irq()
2202 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2208 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
2211 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2259 struct irq_desc *desc; in request_any_context_irq() local
2265 desc = irq_to_desc(irq); in request_any_context_irq()
2266 if (!desc) in request_any_context_irq()
2269 if (irq_settings_is_nested_thread(desc)) { in request_any_context_irq()
2310 struct irq_desc *desc; in request_nmi() local
2327 desc = irq_to_desc(irq); in request_nmi()
2329 if (!desc || (irq_settings_can_autoenable(desc) && in request_nmi()
2331 !irq_settings_can_request(desc) || in request_nmi()
2332 WARN_ON(irq_settings_is_per_cpu_devid(desc)) || in request_nmi()
2333 !irq_supports_nmi(desc)) in request_nmi()
2345 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2349 retval = __setup_irq(irq, desc, action); in request_nmi()
2353 raw_spin_lock_irqsave(&desc->lock, flags); in request_nmi()
2356 desc->istate |= IRQS_NMI; in request_nmi()
2357 retval = irq_nmi_setup(desc); in request_nmi()
2359 __cleanup_nmi(irq, desc); in request_nmi()
2360 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2364 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2369 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2380 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq() local
2382 if (!desc) in enable_percpu_irq()
2391 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2396 ret = __irq_set_trigger(desc, type); in enable_percpu_irq()
2404 irq_percpu_enable(desc, cpu); in enable_percpu_irq()
2406 irq_put_desc_unlock(desc, flags); in enable_percpu_irq()
2425 struct irq_desc *desc; in irq_percpu_is_enabled() local
2429 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in irq_percpu_is_enabled()
2430 if (!desc) in irq_percpu_is_enabled()
2433 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in irq_percpu_is_enabled()
2434 irq_put_desc_unlock(desc, flags); in irq_percpu_is_enabled()
2444 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq() local
2446 if (!desc) in disable_percpu_irq()
2449 irq_percpu_disable(desc, cpu); in disable_percpu_irq()
2450 irq_put_desc_unlock(desc, flags); in disable_percpu_irq()
2464 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq() local
2470 if (!desc) in __free_percpu_irq()
2473 raw_spin_lock_irqsave(&desc->lock, flags); in __free_percpu_irq()
2475 action = desc->action; in __free_percpu_irq()
2481 if (!cpumask_empty(desc->percpu_enabled)) { in __free_percpu_irq()
2483 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
2488 desc->action = NULL; in __free_percpu_irq()
2490 desc->istate &= ~IRQS_NMI; in __free_percpu_irq()
2492 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2496 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2497 module_put(desc->owner); in __free_percpu_irq()
2501 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2514 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq() local
2516 if (desc && irq_settings_is_per_cpu_devid(desc)) in remove_percpu_irq()
2534 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq() local
2536 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_irq()
2539 chip_bus_lock(desc); in free_percpu_irq()
2541 chip_bus_sync_unlock(desc); in free_percpu_irq()
2547 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_nmi() local
2549 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_nmi()
2552 if (WARN_ON(!irq_is_nmi(desc))) in free_percpu_nmi()
2567 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq() local
2570 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in setup_percpu_irq()
2573 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2577 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
2580 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2607 struct irq_desc *desc; in __request_percpu_irq() local
2613 desc = irq_to_desc(irq); in __request_percpu_irq()
2614 if (!desc || !irq_settings_can_request(desc) || in __request_percpu_irq()
2615 !irq_settings_is_per_cpu_devid(desc)) in __request_percpu_irq()
2630 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2636 retval = __setup_irq(irq, desc, action); in __request_percpu_irq()
2639 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2672 struct irq_desc *desc; in request_percpu_nmi() local
2679 desc = irq_to_desc(irq); in request_percpu_nmi()
2681 if (!desc || !irq_settings_can_request(desc) || in request_percpu_nmi()
2682 !irq_settings_is_per_cpu_devid(desc) || in request_percpu_nmi()
2683 irq_settings_can_autoenable(desc) || in request_percpu_nmi()
2684 !irq_supports_nmi(desc)) in request_percpu_nmi()
2688 if (irq_is_nmi(desc)) in request_percpu_nmi()
2701 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2705 retval = __setup_irq(irq, desc, action); in request_percpu_nmi()
2709 raw_spin_lock_irqsave(&desc->lock, flags); in request_percpu_nmi()
2710 desc->istate |= IRQS_NMI; in request_percpu_nmi()
2711 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_percpu_nmi()
2716 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2739 struct irq_desc *desc; in prepare_percpu_nmi() local
2744 desc = irq_get_desc_lock(irq, &flags, in prepare_percpu_nmi()
2746 if (!desc) in prepare_percpu_nmi()
2749 if (WARN(!irq_is_nmi(desc), in prepare_percpu_nmi()
2756 ret = irq_nmi_setup(desc); in prepare_percpu_nmi()
2763 irq_put_desc_unlock(desc, flags); in prepare_percpu_nmi()
2782 struct irq_desc *desc; in teardown_percpu_nmi() local
2786 desc = irq_get_desc_lock(irq, &flags, in teardown_percpu_nmi()
2788 if (!desc) in teardown_percpu_nmi()
2791 if (WARN_ON(!irq_is_nmi(desc))) in teardown_percpu_nmi()
2794 irq_nmi_teardown(desc); in teardown_percpu_nmi()
2796 irq_put_desc_unlock(desc, flags); in teardown_percpu_nmi()
2839 struct irq_desc *desc; in irq_get_irqchip_state() local
2844 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
2845 if (!desc) in irq_get_irqchip_state()
2848 data = irq_desc_get_irq_data(desc); in irq_get_irqchip_state()
2852 irq_put_desc_busunlock(desc, flags); in irq_get_irqchip_state()
2872 struct irq_desc *desc; in irq_set_irqchip_state() local
2878 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()
2879 if (!desc) in irq_set_irqchip_state()
2882 data = irq_desc_get_irq_data(desc); in irq_set_irqchip_state()
2903 irq_put_desc_busunlock(desc, flags); in irq_set_irqchip_state()
2934 struct irq_desc *desc; in irq_check_status_bit() local
2938 desc = irq_to_desc(irq); in irq_check_status_bit()
2939 if (desc) in irq_check_status_bit()
2940 res = !!(desc->status_use_accessors & bitmask); in irq_check_status_bit()