Lines Matching +full:- +full:affinity

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * information is available in Documentation/core-api/genericirq.rst
22 * lockdep: we want to handle all irq_desc locks as a single lock-class:
56 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks()
58 return -ENOMEM; in alloc_masks()
61 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, in alloc_masks()
63 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
64 return -ENOMEM; in alloc_masks()
69 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
71 free_cpumask_var(desc->irq_common_data.effective_affinity); in alloc_masks()
73 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
74 return -ENOMEM; in alloc_masks()
81 const struct cpumask *affinity) in desc_smp_init() argument
83 if (!affinity) in desc_smp_init()
84 affinity = irq_default_affinity; in desc_smp_init()
85 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init()
88 cpumask_clear(desc->pending_mask); in desc_smp_init()
91 desc->irq_common_data.node = node; in desc_smp_init()
98 free_cpumask_var(desc->pending_mask); in free_masks()
100 free_cpumask_var(desc->irq_common_data.affinity); in free_masks()
102 free_cpumask_var(desc->irq_common_data.effective_affinity); in free_masks()
110 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument
115 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument
119 desc->irq_common_data.handler_data = NULL; in desc_set_defaults()
120 desc->irq_common_data.msi_desc = NULL; in desc_set_defaults()
122 desc->irq_data.common = &desc->irq_common_data; in desc_set_defaults()
123 desc->irq_data.irq = irq; in desc_set_defaults()
124 desc->irq_data.chip = &no_irq_chip; in desc_set_defaults()
125 desc->irq_data.chip_data = NULL; in desc_set_defaults()
127 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); in desc_set_defaults()
128 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); in desc_set_defaults()
129 desc->handle_irq = handle_bad_irq; in desc_set_defaults()
130 desc->depth = 1; in desc_set_defaults()
131 desc->irq_count = 0; in desc_set_defaults()
132 desc->irqs_unhandled = 0; in desc_set_defaults()
133 desc->tot_count = 0; in desc_set_defaults()
134 desc->name = NULL; in desc_set_defaults()
135 desc->owner = owner; in desc_set_defaults()
137 *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { }; in desc_set_defaults()
138 desc_smp_init(desc, node, affinity); in desc_set_defaults()
156 return -ENOSPC; in irq_find_free_area()
189 const struct cpumask *affinity, in init_desc() argument
192 desc->kstat_irqs = alloc_percpu(struct irqstat); in init_desc()
193 if (!desc->kstat_irqs) in init_desc()
194 return -ENOMEM; in init_desc()
197 free_percpu(desc->kstat_irqs); in init_desc()
198 return -ENOMEM; in init_desc()
201 raw_spin_lock_init(&desc->lock); in init_desc()
202 lockdep_set_class(&desc->lock, &irq_desc_lock_class); in init_desc()
203 mutex_init(&desc->request_mutex); in init_desc()
204 init_waitqueue_head(&desc->wait_for_threads); in init_desc()
205 desc_set_defaults(irq, desc, node, affinity, owner); in init_desc()
206 irqd_set(&desc->irq_data, flags); in init_desc()
209 kobject_init(&desc->kobj, &irq_kobj_type); in init_desc()
210 init_rcu_head(&desc->rcu); in init_desc()
237 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); in per_cpu_count_show()
241 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); in per_cpu_count_show()
252 raw_spin_lock_irq(&desc->lock); in chip_name_show()
253 if (desc->irq_data.chip && desc->irq_data.chip->name) { in chip_name_show()
255 desc->irq_data.chip->name); in chip_name_show()
257 raw_spin_unlock_irq(&desc->lock); in chip_name_show()
269 raw_spin_lock_irq(&desc->lock); in hwirq_show()
270 if (desc->irq_data.domain) in hwirq_show()
271 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); in hwirq_show()
272 raw_spin_unlock_irq(&desc->lock); in hwirq_show()
284 raw_spin_lock_irq(&desc->lock); in type_show()
286 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); in type_show()
287 raw_spin_unlock_irq(&desc->lock); in type_show()
300 raw_spin_lock_irq(&desc->lock); in wakeup_show()
302 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); in wakeup_show()
303 raw_spin_unlock_irq(&desc->lock); in wakeup_show()
316 raw_spin_lock_irq(&desc->lock); in name_show()
317 if (desc->name) in name_show()
318 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); in name_show()
319 raw_spin_unlock_irq(&desc->lock); in name_show()
333 raw_spin_lock_irq(&desc->lock); in actions_show()
335 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", in actions_show()
336 p, action->name); in actions_show()
339 raw_spin_unlock_irq(&desc->lock); in actions_show()
342 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); in actions_show()
374 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) in irq_sysfs_add()
377 desc->istate |= IRQS_SYSFS; in irq_sysfs_add()
389 if (desc->istate & IRQS_SYSFS) in irq_sysfs_del()
390 kobject_del(&desc->kobj); in irq_sysfs_del()
404 return -ENOMEM; in irq_sysfs_init()
446 const struct cpumask *affinity, in alloc_desc() argument
456 ret = init_desc(desc, irq, node, flags, affinity, owner); in alloc_desc()
470 free_percpu(desc->kstat_irqs); in irq_kobj_release()
478 kobject_put(&desc->kobj); in delayed_free_desc()
506 call_rcu(&desc->rcu, delayed_free_desc); in free_desc()
510 const struct irq_affinity_desc *affinity, in alloc_descs() argument
516 /* Validate affinity mask(s) */ in alloc_descs()
517 if (affinity) { in alloc_descs()
519 if (cpumask_empty(&affinity[i].mask)) in alloc_descs()
520 return -EINVAL; in alloc_descs()
528 if (affinity) { in alloc_descs()
529 if (affinity->is_managed) { in alloc_descs()
534 mask = &affinity->mask; in alloc_descs()
536 affinity++; in alloc_descs()
549 for (i--; i >= 0; i--) in alloc_descs()
551 return -ENOMEM; in alloc_descs()
557 return -ENOMEM; in irq_expand_nr_irqs()
593 [0 ... NR_IRQS-1] = {
596 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
620 while (--i >= 0) { in early_irq_init()
639 raw_spin_lock_irqsave(&desc->lock, flags); in free_desc()
641 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_desc()
646 const struct irq_affinity_desc *affinity, in alloc_descs() argument
654 desc->owner = owner; in alloc_descs()
662 return -ENOMEM; in irq_expand_nr_irqs()
686 return -EINVAL; in handle_irq_desc()
690 return -EPERM; in handle_irq_desc()
697 * generic_handle_irq - Invoke the handler for a particular irq
700 * Returns: 0 on success, or -EINVAL if conversion has failed
712 * generic_handle_irq_safe - Invoke the handler for a particular irq from any
720 * marked to enforce IRQ-context only.
736 * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
741 * Returns: 0 on success, or -EINVAL if conversion has failed
753 * generic_handle_irq_safe - Invoke the handler for a HW irq belonging
761 * context). If the interrupt is marked as 'enforce IRQ-context only' then
777 * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
782 * Returns: 0 on success, or -EINVAL if conversion has failed
797 * irq_free_descs - free irq descriptors
817 * __irq_alloc_descs - allocate and initialize a range of irq descriptors
823 * @affinity: Optional pointer to an affinity mask array of size @cnt which
831 struct module *owner, const struct irq_affinity_desc *affinity) in __irq_alloc_descs() argument
836 return -EINVAL; in __irq_alloc_descs()
840 return -EINVAL; in __irq_alloc_descs()
854 ret = -EEXIST; in __irq_alloc_descs()
863 ret = alloc_descs(start, cnt, node, affinity, owner); in __irq_alloc_descs()
871 * irq_get_next_irq - get next allocated irq number
900 raw_spin_lock_irqsave(&desc->lock, *flags); in __irq_get_desc_lock()
906 __releases(&desc->lock) in __irq_put_desc_unlock()
908 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_put_desc_unlock()
914 const struct cpumask *affinity) in irq_set_percpu_devid_partition() argument
918 if (!desc || desc->percpu_enabled) in irq_set_percpu_devid_partition()
919 return -EINVAL; in irq_set_percpu_devid_partition()
921 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); in irq_set_percpu_devid_partition()
923 if (!desc->percpu_enabled) in irq_set_percpu_devid_partition()
924 return -ENOMEM; in irq_set_percpu_devid_partition()
926 desc->percpu_affinity = affinity ? : cpu_possible_mask; in irq_set_percpu_devid_partition()
937 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) in irq_get_percpu_devid_partition() argument
941 if (!desc || !desc->percpu_enabled) in irq_get_percpu_devid_partition()
942 return -EINVAL; in irq_get_percpu_devid_partition()
944 if (affinity) in irq_get_percpu_devid_partition()
945 cpumask_copy(affinity, desc->percpu_affinity); in irq_get_percpu_devid_partition()
957 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
969 return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; in kstat_irqs_cpu()
980 return data_race(desc->tot_count); in kstat_irqs_desc()
983 sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu)); in kstat_irqs_desc()
991 if (!desc || !desc->kstat_irqs) in kstat_irqs()
1004 if (!desc->kstat_irqs) in kstat_snapshot_irqs()
1006 this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt)); in kstat_snapshot_irqs()
1014 if (!desc || !desc->kstat_irqs) in kstat_get_irq_since_snapshot()
1016 return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref); in kstat_get_irq_since_snapshot()
1022 * kstat_irqs_usr - Get the statistics for an interrupt from thread context
1048 lockdep_set_class(&desc->lock, lock_class); in __irq_set_lockdep_class()
1049 lockdep_set_class(&desc->request_mutex, request_class); in __irq_set_lockdep_class()