/linux-6.12.1/tools/testing/selftests/kvm/ |
D | rseq_test.c | 40 static int min_cpu, max_cpu; variable 63 if (cpu > max_cpu) { in next_cpu() 180 max_cpu = -1; in calc_min_max_cpu() 188 max_cpu = i; in calc_min_max_cpu()
|
/linux-6.12.1/drivers/xen/ |
D | pcpu.c | 274 static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu) in sync_pcpu() argument 290 if (max_cpu) in sync_pcpu() 291 *max_cpu = info->max_present; in sync_pcpu() 321 uint32_t cpu = 0, max_cpu = 0; in xen_sync_pcpus() local 327 while (!err && (cpu <= max_cpu)) { in xen_sync_pcpus() 328 err = sync_pcpu(cpu, &max_cpu); in xen_sync_pcpus()
|
/linux-6.12.1/arch/s390/kernel/ |
D | topology.c | 100 unsigned int max_cpu; in cpu_thread_map() local 109 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); in cpu_thread_map() 110 for (; cpu <= max_cpu; cpu++) { in cpu_thread_map() 129 unsigned int max_cpu, rcore; in add_cpus_to_mask() local 136 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); in add_cpus_to_mask() 137 for (; cpu <= max_cpu; cpu++) { in add_cpus_to_mask()
|
/linux-6.12.1/kernel/sched/ |
D | cpudeadline.c | 124 int cpu, max_cpu = -1; in cpudl_find() local 139 max_cpu = cpu; in cpudl_find() 145 cpumask_set_cpu(max_cpu, later_mask); in cpudl_find()
|
D | topology.c | 1305 int cpu, cores = 0, max_cpu = -1; in init_sched_groups_capacity() local 1322 if (max_cpu < 0) in init_sched_groups_capacity() 1323 max_cpu = cpu; in init_sched_groups_capacity() 1324 else if (sched_asym_prefer(cpu, max_cpu)) in init_sched_groups_capacity() 1325 max_cpu = cpu; in init_sched_groups_capacity() 1327 sg->asym_prefer_cpu = max_cpu; in init_sched_groups_capacity()
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | affinity.c | 287 int max_cpu; in per_cpu_affinity_put_max() local 297 max_cpu = cpumask_first(possible_cpumask); in per_cpu_affinity_put_max() 298 if (max_cpu >= nr_cpu_ids) in per_cpu_affinity_put_max() 301 prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu); in per_cpu_affinity_put_max() 306 max_cpu = curr_cpu; in per_cpu_affinity_put_max() 311 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1; in per_cpu_affinity_put_max() 313 return max_cpu; in per_cpu_affinity_put_max()
|
/linux-6.12.1/tools/testing/selftests/drivers/platform/x86/intel/ifs/ |
D | test_ifs.sh | 454 local max_cpu="" 459 max_cpu=$(($(nproc) - 1)) 460 RANDOM_CPU=$(shuf -i 0-$max_cpu -n 1)
|
/linux-6.12.1/tools/perf/scripts/python/ |
D | sched-migration.py | 321 max_cpu = 0 323 if cpu > max_cpu: 324 max_cpu = cpu 325 return max_cpu
|
/linux-6.12.1/arch/sparc/kernel/ |
D | mdesc.c | 801 int max_cpu, i; in report_platform_properties() local 804 max_cpu = *v; in report_platform_properties() 805 if (max_cpu > NR_CPUS) in report_platform_properties() 806 max_cpu = NR_CPUS; in report_platform_properties() 808 max_cpu = NR_CPUS; in report_platform_properties() 810 for (i = 0; i < max_cpu; i++) in report_platform_properties()
|
/linux-6.12.1/tools/perf/ |
D | builtin-sched.c | 181 struct perf_cpu max_cpu; member 1645 if (this_cpu.cpu > sched->max_cpu.cpu) in map_switch_event() 1646 sched->max_cpu = this_cpu; in map_switch_event() 1655 cpus_nr = sched->max_cpu.cpu; in map_switch_event() 2071 u32 ncpus = sched->max_cpu.cpu + 1; in timehist_header() 2156 u32 max_cpus = sched->max_cpu.cpu + 1; in timehist_print_sample() 2570 printf(" %*s ", sched->max_cpu.cpu + 1, ""); in timehist_print_wakeup_event() 2636 max_cpus = sched->max_cpu.cpu + 1; in timehist_print_migration_event() 3111 printf(" (x %d)\n", sched->max_cpu.cpu); in timehist_print_summary() 3132 if (this_cpu.cpu > sched->max_cpu.cpu) in perf_timehist__process_sample() [all …]
|
/linux-6.12.1/kernel/rcu/ |
D | tree_stall.h | 1027 int max_cpu = -1; in rcu_fwd_progress_check() local 1046 if (max_cpu < 0) in rcu_fwd_progress_check() 1052 max_cpu = cpu; in rcu_fwd_progress_check() 1054 if (max_cpu >= 0) in rcu_fwd_progress_check()
|
/linux-6.12.1/tools/perf/arch/x86/tests/ |
D | intel-pt-test.c | 438 int max_cpu = cpu__max_cpu().cpu; in test__intel_pt_hybrid_compat() local 452 for (cpu = 1, last_caps = caps0; cpu < max_cpu; cpu++) { in test__intel_pt_hybrid_compat()
|
/linux-6.12.1/arch/s390/include/asm/ |
D | cpu_mf.h | 51 u16 max_cpu; member
|
/linux-6.12.1/tools/perf/util/ |
D | synthetic-events.c | 1239 int max_cpu; member 1261 data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu); in synthesize_mask() 1277 data->data->range_cpu_data.end_cpu = data->max_cpu; in synthesize_range_cpus() 1289 syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu; in cpu_map_data__alloc() 1290 if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) { in cpu_map_data__alloc() 1301 BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32); in cpu_map_data__alloc()
|
D | cs-etm.c | 3346 int num_cpu, max_cpu = 0; in cs_etm__process_auxtrace_info_full() local 3394 if ((int) metadata[j][CS_ETM_CPU] > max_cpu) in cs_etm__process_auxtrace_info_full() 3395 max_cpu = metadata[j][CS_ETM_CPU]; in cs_etm__process_auxtrace_info_full() 3425 err = auxtrace_queues__init_nr(&etm->queues, max_cpu + 1); in cs_etm__process_auxtrace_info_full()
|
/linux-6.12.1/drivers/net/ethernet/marvell/ |
D | mvneta.c | 1479 int max_cpu = num_present_cpus(); in mvneta_defaults_set() local 1501 if ((rxq % max_cpu) == cpu) in mvneta_defaults_set() 1505 if ((txq % max_cpu) == cpu) in mvneta_defaults_set() 4332 int elected_cpu = 0, max_cpu, cpu; in mvneta_percpu_elect() local 4340 max_cpu = num_present_cpus(); in mvneta_percpu_elect() 4347 if ((rxq % max_cpu) == cpu) in mvneta_percpu_elect()
|