/linux-6.12.1/kernel/irq/ |
D | migration.c | 29 if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) { in irq_fixup_move_pending() 77 if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) { in irq_move_masked_irq()
|
D | cpuhotplug.c | 40 !cpumask_intersects(m, cpu_online_mask)) { in irq_needs_fixup() 113 if (!cpumask_intersects(affinity, cpu_online_mask)) { in migrate_one_irq()
|
D | proc.c | 160 if (!cpumask_intersects(new_value, cpu_online_mask)) { in write_irq_affinity() 251 if (!cpumask_intersects(new_value, cpu_online_mask)) { in default_affinity_write()
|
D | manage.c | 261 if (!cpumask_intersects(tmp_mask, cpu_online_mask)) in irq_do_set_affinity() 613 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity() 628 if (cpumask_intersects(&mask, nodemask)) in irq_setup_affinity()
|
D | chip.c | 201 if (!cpumask_intersects(aff, cpu_online_mask)) { in __irq_startup_managed()
|
D | msi.c | 1200 !cpumask_intersects(irq_data_get_affinity_mask(irqd), in msi_init_virq()
|
/linux-6.12.1/kernel/cgroup/ |
D | cpuset.c | 358 while (!cpumask_intersects(cs->effective_cpus, pmask)) in guarantee_online_cpus() 508 if (cpumask_intersects(xcpus1, xcpus2)) in cpusets_are_exclusive() 630 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap() 783 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains() 1182 (!cpumask_intersects(xcpus, cpu_active_mask) && in tasks_nocpu_error() 1369 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) || in remote_partition_enable() 1455 cpumask_intersects(tmp->addmask, subpartitions_cpus) || in remote_cpus_update() 1498 !cpumask_intersects(delmask, subpartitions_cpus)) in remote_partition_check() 1511 if (cpumask_intersects(child->effective_cpus, delmask)) { in remote_partition_check() 1638 !cpumask_intersects(xcpus, parent->effective_xcpus)) in update_parent_effective_cpumask() [all …]
|
/linux-6.12.1/arch/mips/kernel/ |
D | mips-mt-fpaff.c | 122 cpumask_intersects(new_mask, &mt_fpu_cpumask)) { in mipsmt_sys_sched_setaffinity()
|
D | traps.c | 944 if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { in mt_ase_fp_affinity()
|
/linux-6.12.1/arch/powerpc/platforms/cell/ |
D | spu_priv1_mmio.c | 71 if (!cpumask_intersects(spumask, cpumask)) in cpu_affinity_set()
|
/linux-6.12.1/arch/mips/mm/ |
D | context.c | 249 cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) { in check_switch_mmu_context()
|
/linux-6.12.1/drivers/sh/intc/ |
D | chip.c | 72 if (!cpumask_intersects(cpumask, cpu_online_mask)) in intc_set_affinity()
|
/linux-6.12.1/kernel/bpf/ |
D | cpumask.c | 319 return cpumask_intersects(src1, src2); in bpf_cpumask_intersects()
|
/linux-6.12.1/lib/ |
D | group_cpus.c | 92 if (cpumask_intersects(mask, node_to_cpumask[n])) { in get_nodes_in_cpumask()
|
/linux-6.12.1/arch/arm/kernel/ |
D | hw_breakpoint.c | 1108 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { in reset_ctrl_regs() 1128 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { in reset_ctrl_regs()
|
/linux-6.12.1/kernel/sched/ |
D | topology.c | 85 cpumask_intersects(groupmask, sched_group_span(group))) { in sched_domain_debug_one() 1365 if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) in asym_cpu_capacity_classify() 1367 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) in asym_cpu_capacity_classify() 2384 cpumask_intersects(tl->mask(cpu), tl->mask(i))) in topology_span_sane()
|
D | deadline.c | 2900 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) { in set_cpus_allowed_dl()
|
/linux-6.12.1/drivers/hwmon/ |
D | coretemp.c | 716 if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu))) in coretemp_cpu_online()
|
/linux-6.12.1/include/linux/ |
D | cpumask.h | 725 bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) in cpumask_intersects() function
|
/linux-6.12.1/arch/x86/kernel/cpu/resctrl/ |
D | pseudo_lock.c | 867 if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl)) in rdtgroup_pseudo_locked_in_hierarchy()
|
/linux-6.12.1/arch/powerpc/platforms/cell/spufs/ |
D | sched.c | 160 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
|
/linux-6.12.1/kernel/ |
D | padata.c | 703 if (!cpumask_intersects(cpumask, cpu_online_mask)) { in padata_validate_cpumask()
|
D | workqueue.c | 2697 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) in unbind_worker() 5182 if (!cpumask_intersects(attrs->__pod_cpumask, wq_online_cpumask)) { in wq_calc_pod_cpumask() 7671 if (!cpumask_intersects(wq_unbound_cpumask, mask)) { in restrict_unbound_cpumask()
|
/linux-6.12.1/kernel/trace/ |
D | trace_events_filter.c | 661 return cpumask_intersects(mask, cmp); in do_filter_cpumask()
|
/linux-6.12.1/drivers/vdpa/vdpa_user/ |
D | vduse_dev.c | 1496 if (!cpumask_intersects(new_value, cpu_online_mask)) in irq_cb_affinity_store()
|