Home
last modified time | relevance | path

Searched refs:nr_cpu_ids (Results 1 – 25 of 322) sorted by relevance

12345678910>>...13

/linux-6.12.1/kernel/
Dwatchdog_buddy.c16 if (next_cpu >= nr_cpu_ids) in watchdog_next_cpu()
20 return nr_cpu_ids; in watchdog_next_cpu()
51 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_enable()
75 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_disable()
102 if (next_cpu >= nr_cpu_ids) in watchdog_buddy_check_hardlockup()
Dscftorture.c343 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one()
350 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one()
373 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one()
456 cpu = scfp->cpu % nr_cpu_ids; in scftorture_invoker()
466 WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids, in scftorture_invoker()
468 __func__, scfp->cpu, curcpu, nr_cpu_ids); in scftorture_invoker()
565 weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
566 weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
567 weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
568 weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
Dsmp.c269 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) in csd_lock_wait_toolong()
439 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single()
761 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
825 if (cpu < nr_cpu_ids) in smp_call_function_many_cond()
968 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus()
989 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
990 EXPORT_SYMBOL(nr_cpu_ids);
1145 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in smp_call_on_cpu()
/linux-6.12.1/lib/
Dcpumask_kunit.c44 for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
68 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask), in test_cpumask_weight()
70 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all)); in test_cpumask_weight()
75 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty)); in test_cpumask_first()
79 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask), in test_cpumask_first()
87 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask), in test_cpumask_last()
94 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask), in test_cpumask_next()
97 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty), in test_cpumask_next()
Dcpumask.c152 WARN_ON(cpu >= nr_cpu_ids); in cpumask_local_spread()
179 if (next < nr_cpu_ids) in cpumask_any_and_distribute()
199 if (next < nr_cpu_ids) in cpumask_any_distribute()
Dobjpool.c53 for (i = 0; i < nr_cpu_ids; i++) { in objpool_init_percpu_slots()
112 for (i = 0; i < nr_cpu_ids; i++) in objpool_fini_percpu_slots()
145 slot_size = nr_cpu_ids * sizeof(struct objpool_slot); in objpool_init()
/linux-6.12.1/tools/sched_ext/
Dscx_central.bpf.c59 const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */ variable
193 bpf_for(cpu, 0, nr_cpu_ids) { in BPF_STRUCT_OPS()
200 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
231 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
246 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
254 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
272 bpf_for(i, 0, nr_cpu_ids) { in central_timerfn()
273 s32 cpu = (nr_timers + i) % nr_cpu_ids; in central_timerfn()
280 started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in central_timerfn()
Dscx_central.c60 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); in main()
80 RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids); in main()
81 RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids); in main()
96 cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); in main()
102 skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); in main()
/linux-6.12.1/arch/powerpc/kernel/
Dpaca.c62 size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); in alloc_shared_lppaca()
245 paca_nr_cpu_ids = nr_cpu_ids; in allocate_paca_ptrs()
247 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in allocate_paca_ptrs()
291 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in free_unused_pacas()
296 paca_nr_cpu_ids = nr_cpu_ids; in free_unused_pacas()
309 paca_ptrs_size + paca_struct_size, nr_cpu_ids); in free_unused_pacas()
Dsetup-common.c325 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo()
337 if ((*pos) < nr_cpu_ids) in c_start()
418 for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) { in assign_threads()
460 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), in smp_setup_cpu_maps()
464 __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); in smp_setup_cpu_maps()
509 } else if (cpu >= nr_cpu_ids) { in smp_setup_cpu_maps()
514 if (cpu < nr_cpu_ids) in smp_setup_cpu_maps()
548 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps()
552 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps()
553 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps()
[all …]
/linux-6.12.1/arch/x86/kernel/cpu/
Dtopology.c288 if (apic_id != topo_info.boot_cpu_apic_id && topo_info.nr_assigned_cpus >= nr_cpu_ids) { in topology_register_apic()
289 pr_warn_once("CPU limit of %d reached. Ignoring further CPUs\n", nr_cpu_ids); in topology_register_apic()
429 unsigned int possible = nr_cpu_ids; in topology_apply_cmdline_limits_early()
438 if (possible < nr_cpu_ids) { in topology_apply_cmdline_limits_early()
480 if (WARN_ON_ONCE(assigned > nr_cpu_ids)) { in topology_init_possible_cpus()
481 disabled += assigned - nr_cpu_ids; in topology_init_possible_cpus()
482 assigned = nr_cpu_ids; in topology_init_possible_cpus()
484 allowed = min_t(unsigned int, total, nr_cpu_ids); in topology_init_possible_cpus()
/linux-6.12.1/arch/arm/mach-spear/
Dplatsmp.c102 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus()
104 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus()
105 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
/linux-6.12.1/include/linux/
Dcpumask.h25 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
28 #define nr_cpu_ids ((unsigned int)NR_CPUS) macro
30 extern unsigned int nr_cpu_ids;
36 WARN_ON(nr != nr_cpu_ids); in set_nr_cpu_ids()
38 nr_cpu_ids = nr; in set_nr_cpu_ids()
70 #define small_cpumask_bits nr_cpu_ids
73 #define small_cpumask_bits nr_cpu_ids
74 #define large_cpumask_bits nr_cpu_ids
76 #define nr_cpumask_bits nr_cpu_ids
1216 nr_cpu_ids); in cpumap_print_to_pagebuf()
[all …]
/linux-6.12.1/arch/arm/mach-bcm/
Dbcm63xx_smp.c64 if (ncores > nr_cpu_ids) { in scu_a9_enable()
66 ncores, nr_cpu_ids); in scu_a9_enable()
67 ncores = nr_cpu_ids; in scu_a9_enable()
/linux-6.12.1/arch/arm/kernel/
Ddevtree.c129 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " in arm_dt_init_cpu_maps()
131 cpuidx, nr_cpu_ids)) { in arm_dt_init_cpu_maps()
132 cpuidx = nr_cpu_ids; in arm_dt_init_cpu_maps()
/linux-6.12.1/arch/riscv/kernel/
Dsmpboot.c153 if (cpuid > nr_cpu_ids) in of_parse_and_init_cpus()
155 cpuid, nr_cpu_ids); in of_parse_and_init_cpus()
169 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) in setup_smp()
Dacpi_numa.c42 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_cpu_for_acpi_id()
100 for (i = 0; i < nr_cpu_ids; i++) in acpi_map_cpus_to_nodes()
/linux-6.12.1/scripts/gdb/linux/
Dtimerlist.py152 nr_cpu_ids = 1
154 nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids")
158 num_bytes = (nr_cpu_ids + 7) / 8
174 extra = nr_cpu_ids % 8
/linux-6.12.1/kernel/irq/
Dipi.c70 if (next < nr_cpu_ids) in irq_reserve_ipi()
72 if (next < nr_cpu_ids) { in irq_reserve_ipi()
168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq()
199 if (cpu >= nr_cpu_ids) in ipi_send_verify()
/linux-6.12.1/net/netfilter/
Dnf_flow_table_procfs.c14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start()
29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next()
/linux-6.12.1/arch/x86/kernel/apic/
Dprobe_32.c98 if (nr_cpu_ids <= 8 || xen_pv_domain()) in x86_32_probe_bigsmp_early()
121 if (nr_cpu_ids > 8 && !xen_pv_domain()) in x86_32_install_bigsmp()
/linux-6.12.1/drivers/perf/
Darm_pmu_platform.c87 cpu = nr_cpu_ids; in pmu_parse_irq_affinity()
123 if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node)) in pmu_parse_irqs()
141 if (cpu >= nr_cpu_ids) in pmu_parse_irqs()
/linux-6.12.1/arch/arm/mach-omap2/
Domap-smp.c278 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus()
280 ncores, nr_cpu_ids); in omap4_smp_init_cpus()
281 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
/linux-6.12.1/arch/x86/xen/
Dsmp_pv.c156 for (i = 0; i < nr_cpu_ids; i++) in xen_pv_smp_config()
215 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus()
414 for (cpus = 0; cpus < nr_cpu_ids; cpus++) { in xen_smp_count_cpus()
420 if (cpus < nr_cpu_ids) in xen_smp_count_cpus()
/linux-6.12.1/kernel/sched/
Disolation.c46 if (cpu < nr_cpu_ids) in housekeeping_any_cpu()
50 if (likely(cpu < nr_cpu_ids)) in housekeeping_any_cpu()
143 if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) { in housekeeping_setup()

12345678910>>...13