Lines Matching full:desc

54 static int alloc_masks(struct irq_desc *desc, int node)  in alloc_masks()  argument
56 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks()
61 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, in alloc_masks()
63 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
69 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
71 free_cpumask_var(desc->irq_common_data.effective_affinity); in alloc_masks()
73 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
80 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument
85 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init()
88 cpumask_clear(desc->pending_mask); in desc_smp_init()
91 desc->irq_common_data.node = node; in desc_smp_init()
95 static void free_masks(struct irq_desc *desc) in free_masks() argument
98 free_cpumask_var(desc->pending_mask); in free_masks()
100 free_cpumask_var(desc->irq_common_data.affinity); in free_masks()
102 free_cpumask_var(desc->irq_common_data.effective_affinity); in free_masks()
108 alloc_masks(struct irq_desc *desc, int node) { return 0; } in alloc_masks() argument
110 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
111 static inline void free_masks(struct irq_desc *desc) { } in free_masks() argument
114 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument
119 desc->irq_common_data.handler_data = NULL; in desc_set_defaults()
120 desc->irq_common_data.msi_desc = NULL; in desc_set_defaults()
122 desc->irq_data.common = &desc->irq_common_data; in desc_set_defaults()
123 desc->irq_data.irq = irq; in desc_set_defaults()
124 desc->irq_data.chip = &no_irq_chip; in desc_set_defaults()
125 desc->irq_data.chip_data = NULL; in desc_set_defaults()
126 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); in desc_set_defaults()
127 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); in desc_set_defaults()
128 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); in desc_set_defaults()
129 desc->handle_irq = handle_bad_irq; in desc_set_defaults()
130 desc->depth = 1; in desc_set_defaults()
131 desc->irq_count = 0; in desc_set_defaults()
132 desc->irqs_unhandled = 0; in desc_set_defaults()
133 desc->tot_count = 0; in desc_set_defaults()
134 desc->name = NULL; in desc_set_defaults()
135 desc->owner = owner; in desc_set_defaults()
137 *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { }; in desc_set_defaults()
138 desc_smp_init(desc, node, affinity); in desc_set_defaults()
163 struct irq_desc *desc; in irq_find_at_or_after() local
166 desc = mt_find(&sparse_irqs, &index, nr_irqs); in irq_find_at_or_after()
168 return desc ? irq_desc_get_irq(desc) : nr_irqs; in irq_find_at_or_after()
171 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) in irq_insert_desc() argument
174 WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); in irq_insert_desc()
187 static int init_desc(struct irq_desc *desc, int irq, int node, in init_desc() argument
192 desc->kstat_irqs = alloc_percpu(struct irqstat); in init_desc()
193 if (!desc->kstat_irqs) in init_desc()
196 if (alloc_masks(desc, node)) { in init_desc()
197 free_percpu(desc->kstat_irqs); in init_desc()
201 raw_spin_lock_init(&desc->lock); in init_desc()
202 lockdep_set_class(&desc->lock, &irq_desc_lock_class); in init_desc()
203 mutex_init(&desc->request_mutex); in init_desc()
204 init_waitqueue_head(&desc->wait_for_threads); in init_desc()
205 desc_set_defaults(irq, desc, node, affinity, owner); in init_desc()
206 irqd_set(&desc->irq_data, flags); in init_desc()
207 irq_resend_init(desc); in init_desc()
209 kobject_init(&desc->kobj, &irq_kobj_type); in init_desc()
210 init_rcu_head(&desc->rcu); in init_desc()
229 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in per_cpu_count_show() local
235 unsigned int c = irq_desc_kstat_cpu(desc, cpu); in per_cpu_count_show()
249 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in chip_name_show() local
252 raw_spin_lock_irq(&desc->lock); in chip_name_show()
253 if (desc->irq_data.chip && desc->irq_data.chip->name) { in chip_name_show()
255 desc->irq_data.chip->name); in chip_name_show()
257 raw_spin_unlock_irq(&desc->lock); in chip_name_show()
266 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in hwirq_show() local
269 raw_spin_lock_irq(&desc->lock); in hwirq_show()
270 if (desc->irq_data.domain) in hwirq_show()
271 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); in hwirq_show()
272 raw_spin_unlock_irq(&desc->lock); in hwirq_show()
281 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in type_show() local
284 raw_spin_lock_irq(&desc->lock); in type_show()
286 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); in type_show()
287 raw_spin_unlock_irq(&desc->lock); in type_show()
297 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in wakeup_show() local
300 raw_spin_lock_irq(&desc->lock); in wakeup_show()
302 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); in wakeup_show()
303 raw_spin_unlock_irq(&desc->lock); in wakeup_show()
313 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in name_show() local
316 raw_spin_lock_irq(&desc->lock); in name_show()
317 if (desc->name) in name_show()
318 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); in name_show()
319 raw_spin_unlock_irq(&desc->lock); in name_show()
328 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in actions_show() local
333 raw_spin_lock_irq(&desc->lock); in actions_show()
334 for_each_action_of_desc(desc, action) { in actions_show()
339 raw_spin_unlock_irq(&desc->lock); in actions_show()
366 static void irq_sysfs_add(int irq, struct irq_desc *desc) in irq_sysfs_add() argument
374 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) in irq_sysfs_add()
377 desc->istate |= IRQS_SYSFS; in irq_sysfs_add()
381 static void irq_sysfs_del(struct irq_desc *desc) in irq_sysfs_del() argument
389 if (desc->istate & IRQS_SYSFS) in irq_sysfs_del()
390 kobject_del(&desc->kobj); in irq_sysfs_del()
395 struct irq_desc *desc; in irq_sysfs_init() local
408 for_each_irq_desc(irq, desc) in irq_sysfs_init()
409 irq_sysfs_add(irq, desc); in irq_sysfs_init()
422 static void irq_sysfs_add(int irq, struct irq_desc *desc) {} in irq_sysfs_add() argument
423 static void irq_sysfs_del(struct irq_desc *desc) {} in irq_sysfs_del() argument
449 struct irq_desc *desc; in alloc_desc() local
452 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); in alloc_desc()
453 if (!desc) in alloc_desc()
456 ret = init_desc(desc, irq, node, flags, affinity, owner); in alloc_desc()
458 kfree(desc); in alloc_desc()
462 return desc; in alloc_desc()
467 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); in irq_kobj_release() local
469 free_masks(desc); in irq_kobj_release()
470 free_percpu(desc->kstat_irqs); in irq_kobj_release()
471 kfree(desc); in irq_kobj_release()
476 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); in delayed_free_desc() local
478 kobject_put(&desc->kobj); in delayed_free_desc()
483 struct irq_desc *desc = irq_to_desc(irq); in free_desc() local
485 irq_remove_debugfs_entry(desc); in free_desc()
486 unregister_irq_proc(irq, desc); in free_desc()
497 irq_sysfs_del(desc); in free_desc()
506 call_rcu(&desc->rcu, delayed_free_desc); in free_desc()
513 struct irq_desc *desc; in alloc_descs() local
539 desc = alloc_desc(start + i, node, flags, mask, owner); in alloc_descs()
540 if (!desc) in alloc_descs()
542 irq_insert_desc(start + i, desc); in alloc_descs()
543 irq_sysfs_add(start + i, desc); in alloc_descs()
544 irq_add_debugfs_entry(start + i, desc); in alloc_descs()
565 struct irq_desc *desc; in early_irq_init() local
584 desc = alloc_desc(i, node, 0, NULL, NULL); in early_irq_init()
585 irq_insert_desc(i, desc); in early_irq_init()
636 struct irq_desc *desc = irq_to_desc(irq); in free_desc() local
639 raw_spin_lock_irqsave(&desc->lock, flags); in free_desc()
640 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); in free_desc()
641 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_desc()
652 struct irq_desc *desc = irq_to_desc(start + i); in alloc_descs() local
654 desc->owner = owner; in alloc_descs()
655 irq_insert_desc(start + i, desc); in alloc_descs()
681 int handle_irq_desc(struct irq_desc *desc) in handle_irq_desc() argument
685 if (!desc) in handle_irq_desc()
688 data = irq_desc_get_irq_data(desc); in handle_irq_desc()
692 generic_handle_irq_desc(desc); in handle_irq_desc()
885 struct irq_desc *desc = irq_to_desc(irq); in __irq_get_desc_lock() local
887 if (desc) { in __irq_get_desc_lock()
890 !irq_settings_is_per_cpu_devid(desc)) in __irq_get_desc_lock()
894 irq_settings_is_per_cpu_devid(desc)) in __irq_get_desc_lock()
899 chip_bus_lock(desc); in __irq_get_desc_lock()
900 raw_spin_lock_irqsave(&desc->lock, *flags); in __irq_get_desc_lock()
902 return desc; in __irq_get_desc_lock()
905 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) in __irq_put_desc_unlock() argument
906 __releases(&desc->lock) in __irq_put_desc_unlock()
908 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_put_desc_unlock()
910 chip_bus_sync_unlock(desc); in __irq_put_desc_unlock()
916 struct irq_desc *desc = irq_to_desc(irq); in irq_set_percpu_devid_partition() local
918 if (!desc || desc->percpu_enabled) in irq_set_percpu_devid_partition()
921 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); in irq_set_percpu_devid_partition()
923 if (!desc->percpu_enabled) in irq_set_percpu_devid_partition()
926 desc->percpu_affinity = affinity ? : cpu_possible_mask; in irq_set_percpu_devid_partition()
939 struct irq_desc *desc = irq_to_desc(irq); in irq_get_percpu_devid_partition() local
941 if (!desc || !desc->percpu_enabled) in irq_get_percpu_devid_partition()
945 cpumask_copy(affinity, desc->percpu_affinity); in irq_get_percpu_devid_partition()
967 struct irq_desc *desc = irq_to_desc(irq); in kstat_irqs_cpu() local
969 return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; in kstat_irqs_cpu()
972 unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask) in kstat_irqs_desc() argument
977 if (!irq_settings_is_per_cpu_devid(desc) && in kstat_irqs_desc()
978 !irq_settings_is_per_cpu(desc) && in kstat_irqs_desc()
979 !irq_is_nmi(desc)) in kstat_irqs_desc()
980 return data_race(desc->tot_count); in kstat_irqs_desc()
983 sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu)); in kstat_irqs_desc()
989 struct irq_desc *desc = irq_to_desc(irq); in kstat_irqs() local
991 if (!desc || !desc->kstat_irqs) in kstat_irqs()
993 return kstat_irqs_desc(desc, cpu_possible_mask); in kstat_irqs()
1000 struct irq_desc *desc; in kstat_snapshot_irqs() local
1003 for_each_irq_desc(irq, desc) { in kstat_snapshot_irqs()
1004 if (!desc->kstat_irqs) in kstat_snapshot_irqs()
1006 this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt)); in kstat_snapshot_irqs()
1012 struct irq_desc *desc = irq_to_desc(irq); in kstat_get_irq_since_snapshot() local
1014 if (!desc || !desc->kstat_irqs) in kstat_get_irq_since_snapshot()
1016 return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref); in kstat_get_irq_since_snapshot()
1045 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_lockdep_class() local
1047 if (desc) { in __irq_set_lockdep_class()
1048 lockdep_set_class(&desc->lock, lock_class); in __irq_set_lockdep_class()
1049 lockdep_set_class(&desc->request_mutex, request_class); in __irq_set_lockdep_class()