Searched refs:cpu_cnt (Results 1 – 12 of 12) sorted by relevance
/linux-6.12.1/block/ |
D | blk-cgroup-rwstat.h | 27 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; member 39 percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]); in blkg_rwstat_read_counter() 67 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD]; in blkg_rwstat_add() 69 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; in blkg_rwstat_add() 71 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; in blkg_rwstat_add() 76 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; in blkg_rwstat_add() 78 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; in blkg_rwstat_add() 96 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]); in blkg_rwstat_read() 124 percpu_counter_set(&rwstat->cpu_cnt[i], 0); in blkg_rwstat_reset() 143 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); in blkg_rwstat_add_aux()
|
D | blk-cgroup-rwstat.c | 12 ret = percpu_counter_init_many(rwstat->cpu_cnt, 0, gfp, BLKG_RWSTAT_NR); in blkg_rwstat_init() 24 percpu_counter_destroy_many(rwstat->cpu_cnt, BLKG_RWSTAT_NR); in blkg_rwstat_exit()
|
D | bfq-cgroup.c | 23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); in bfq_stat_init() 33 percpu_counter_destroy(&stat->cpu_cnt); in bfq_stat_exit() 46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); in bfq_stat_add() 55 return percpu_counter_sum_positive(&stat->cpu_cnt); in bfq_stat_read() 64 percpu_counter_set(&stat->cpu_cnt, 0); in bfq_stat_reset()
|
D | bfq-iosched.h | 916 struct percpu_counter cpu_cnt; member
|
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/ |
D | get_branch_snapshot.c | 7 static int cpu_cnt; variable 46 cpu_cnt = libbpf_num_possible_cpus(); in create_perf_events() 47 pfd_array = malloc(sizeof(int) * cpu_cnt); in create_perf_events() 49 cpu_cnt = 0; in create_perf_events() 53 for (cpu = 0; cpu < cpu_cnt; cpu++) { in create_perf_events() 67 for (cpu = 0; cpu < cpu_cnt; cpu++) { in close_perf_events()
|
/linux-6.12.1/tools/testing/selftests/bpf/benchs/ |
D | bench_ringbufs.c | 470 int cpu_cnt; /* number of allocated CPU buffers */ member 491 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, -1); in perfbuf_custom_consumer()
|
/linux-6.12.1/tools/bpf/bpftool/ |
D | map_perf_ring.c | 186 opts.cpu_cnt = ctx.all_cpus ? 0 : 1; in do_event_pipe()
|
/linux-6.12.1/tools/power/x86/intel-speed-select/ |
D | isst-config.c | 678 static int cpu_cnt[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE][MAX_PUNIT_PER_DIE]; variable 703 return cpu_cnt[id->pkg][id->die][id->punit]; in get_cpu_count() 810 cpu_cnt[pkg_id][die_id][punit_id]++; in create_cpu_map() 840 cpu_set_t *core_cpumask, int *cpu_cnt) in set_cpu_mask_from_punit_coremask() argument 847 *cpu_cnt = 0; in set_cpu_mask_from_punit_coremask() 867 *cpu_cnt = cnt; in set_cpu_mask_from_punit_coremask()
|
D | isst.h | 233 int *cpu_cnt);
|
/linux-6.12.1/tools/lib/bpf/ |
D | libbpf.c | 13077 int cpu_cnt; member 13102 int cpu_cnt; /* number of allocated CPU buffers */ member 13130 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__free() 13250 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); in perf_buffer__new_raw() 13319 if (p->cpu_cnt > 0) { in __perf_buffer__new() 13320 pb->cpu_cnt = p->cpu_cnt; in __perf_buffer__new() 13322 pb->cpu_cnt = libbpf_num_possible_cpus(); in __perf_buffer__new() 13323 if (pb->cpu_cnt < 0) { in __perf_buffer__new() 13324 err = pb->cpu_cnt; in __perf_buffer__new() 13327 if (map.max_entries && map.max_entries < pb->cpu_cnt) in __perf_buffer__new() [all …]
|
D | libbpf.h | 1577 int cpu_cnt; member
|
/linux-6.12.1/drivers/scsi/lpfc/ |
D | lpfc_init.c | 13013 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; in lpfc_sli4_enable_msix() local 13026 cpu_cnt = cpumask_weight(aff_mask); in lpfc_sli4_enable_msix() 13027 vectors = min(phba->cfg_irq_chann, cpu_cnt); in lpfc_sli4_enable_msix()
|