Lines Matching +full:local +full:- +full:cap +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
21 #include "bpf-event.h"
22 #include "bpf-utils.h"
28 down_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
30 up_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
35 __u32 prog_id = info_node->info_linear->info.id; in __perf_env__insert_bpf_prog_info()
40 p = &env->bpf_progs.infos.rb_node; in __perf_env__insert_bpf_prog_info()
45 if (prog_id < node->info_linear->info.id) { in __perf_env__insert_bpf_prog_info()
46 p = &(*p)->rb_left; in __perf_env__insert_bpf_prog_info()
47 } else if (prog_id > node->info_linear->info.id) { in __perf_env__insert_bpf_prog_info()
48 p = &(*p)->rb_right; in __perf_env__insert_bpf_prog_info()
55 rb_link_node(&info_node->rb_node, parent, p); in __perf_env__insert_bpf_prog_info()
56 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); in __perf_env__insert_bpf_prog_info()
57 env->bpf_progs.infos_cnt++; in __perf_env__insert_bpf_prog_info()
66 down_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
67 n = env->bpf_progs.infos.rb_node; in perf_env__find_bpf_prog_info()
71 if (prog_id < node->info_linear->info.id) in perf_env__find_bpf_prog_info()
72 n = n->rb_left; in perf_env__find_bpf_prog_info()
73 else if (prog_id > node->info_linear->info.id) in perf_env__find_bpf_prog_info()
74 n = n->rb_right; in perf_env__find_bpf_prog_info()
81 up_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
89 down_write(&env->bpf_progs.lock); in perf_env__insert_btf()
91 up_write(&env->bpf_progs.lock); in perf_env__insert_btf()
98 __u32 btf_id = btf_node->id; in __perf_env__insert_btf()
102 p = &env->bpf_progs.btfs.rb_node; in __perf_env__insert_btf()
107 if (btf_id < node->id) { in __perf_env__insert_btf()
108 p = &(*p)->rb_left; in __perf_env__insert_btf()
109 } else if (btf_id > node->id) { in __perf_env__insert_btf()
110 p = &(*p)->rb_right; in __perf_env__insert_btf()
117 rb_link_node(&btf_node->rb_node, parent, p); in __perf_env__insert_btf()
118 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); in __perf_env__insert_btf()
119 env->bpf_progs.btfs_cnt++; in __perf_env__insert_btf()
127 down_read(&env->bpf_progs.lock); in perf_env__find_btf()
129 up_read(&env->bpf_progs.lock); in perf_env__find_btf()
138 n = env->bpf_progs.btfs.rb_node; in __perf_env__find_btf()
142 if (btf_id < node->id) in __perf_env__find_btf()
143 n = n->rb_left; in __perf_env__find_btf()
144 else if (btf_id > node->id) in __perf_env__find_btf()
145 n = n->rb_right; in __perf_env__find_btf()
158 down_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
160 root = &env->bpf_progs.infos; in perf_env__purge_bpf()
167 next = rb_next(&node->rb_node); in perf_env__purge_bpf()
168 rb_erase(&node->rb_node, root); in perf_env__purge_bpf()
169 zfree(&node->info_linear); in perf_env__purge_bpf()
173 env->bpf_progs.infos_cnt = 0; in perf_env__purge_bpf()
175 root = &env->bpf_progs.btfs; in perf_env__purge_bpf()
182 next = rb_next(&node->rb_node); in perf_env__purge_bpf()
183 rb_erase(&node->rb_node, root); in perf_env__purge_bpf()
187 env->bpf_progs.btfs_cnt = 0; in perf_env__purge_bpf()
189 up_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
203 zfree(&env->hostname); in perf_env__exit()
204 zfree(&env->os_release); in perf_env__exit()
205 zfree(&env->version); in perf_env__exit()
206 zfree(&env->arch); in perf_env__exit()
207 zfree(&env->cpu_desc); in perf_env__exit()
208 zfree(&env->cpuid); in perf_env__exit()
209 zfree(&env->cmdline); in perf_env__exit()
210 zfree(&env->cmdline_argv); in perf_env__exit()
211 zfree(&env->sibling_dies); in perf_env__exit()
212 zfree(&env->sibling_cores); in perf_env__exit()
213 zfree(&env->sibling_threads); in perf_env__exit()
214 zfree(&env->pmu_mappings); in perf_env__exit()
215 zfree(&env->cpu); in perf_env__exit()
216 for (i = 0; i < env->nr_cpu_pmu_caps; i++) in perf_env__exit()
217 zfree(&env->cpu_pmu_caps[i]); in perf_env__exit()
218 zfree(&env->cpu_pmu_caps); in perf_env__exit()
219 zfree(&env->numa_map); in perf_env__exit()
221 for (i = 0; i < env->nr_numa_nodes; i++) in perf_env__exit()
222 perf_cpu_map__put(env->numa_nodes[i].map); in perf_env__exit()
223 zfree(&env->numa_nodes); in perf_env__exit()
225 for (i = 0; i < env->caches_cnt; i++) in perf_env__exit()
226 cpu_cache_level__free(&env->caches[i]); in perf_env__exit()
227 zfree(&env->caches); in perf_env__exit()
229 for (i = 0; i < env->nr_memory_nodes; i++) in perf_env__exit()
230 zfree(&env->memory_nodes[i].set); in perf_env__exit()
231 zfree(&env->memory_nodes); in perf_env__exit()
233 for (i = 0; i < env->nr_hybrid_nodes; i++) { in perf_env__exit()
234 zfree(&env->hybrid_nodes[i].pmu_name); in perf_env__exit()
235 zfree(&env->hybrid_nodes[i].cpus); in perf_env__exit()
237 zfree(&env->hybrid_nodes); in perf_env__exit()
239 for (i = 0; i < env->nr_pmus_with_caps; i++) { in perf_env__exit()
240 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) in perf_env__exit()
241 zfree(&env->pmu_caps[i].caps[j]); in perf_env__exit()
242 zfree(&env->pmu_caps[i].caps); in perf_env__exit()
243 zfree(&env->pmu_caps[i].pmu_name); in perf_env__exit()
245 zfree(&env->pmu_caps); in perf_env__exit()
251 env->bpf_progs.infos = RB_ROOT; in perf_env__init()
252 env->bpf_progs.btfs = RB_ROOT; in perf_env__init()
253 init_rwsem(&env->bpf_progs.lock); in perf_env__init()
255 env->kernel_is_64_bit = -1; in perf_env__init()
266 env->kernel_is_64_bit = 1; in perf_env__init_kernel_mode()
268 env->kernel_is_64_bit = 0; in perf_env__init_kernel_mode()
273 if (env->kernel_is_64_bit == -1) in perf_env__kernel_is_64_bit()
276 return env->kernel_is_64_bit; in perf_env__kernel_is_64_bit()
284 env->cmdline_argv = calloc(argc, sizeof(char *)); in perf_env__set_cmdline()
285 if (env->cmdline_argv == NULL) in perf_env__set_cmdline()
293 env->cmdline_argv[i] = argv[i]; in perf_env__set_cmdline()
294 if (env->cmdline_argv[i] == NULL) in perf_env__set_cmdline()
298 env->nr_cmdline = argc; in perf_env__set_cmdline()
302 zfree(&env->cmdline_argv); in perf_env__set_cmdline()
304 return -ENOMEM; in perf_env__set_cmdline()
311 if (env->cpu != NULL) in perf_env__read_cpu_topology_map()
314 if (env->nr_cpus_avail == 0) in perf_env__read_cpu_topology_map()
315 env->nr_cpus_avail = cpu__max_present_cpu().cpu; in perf_env__read_cpu_topology_map()
317 nr_cpus = env->nr_cpus_avail; in perf_env__read_cpu_topology_map()
318 if (nr_cpus == -1) in perf_env__read_cpu_topology_map()
319 return -EINVAL; in perf_env__read_cpu_topology_map()
321 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0])); in perf_env__read_cpu_topology_map()
322 if (env->cpu == NULL) in perf_env__read_cpu_topology_map()
323 return -ENOMEM; in perf_env__read_cpu_topology_map()
328 env->cpu[idx].core_id = cpu__get_core_id(cpu); in perf_env__read_cpu_topology_map()
329 env->cpu[idx].socket_id = cpu__get_socket_id(cpu); in perf_env__read_cpu_topology_map()
330 env->cpu[idx].die_id = cpu__get_die_id(cpu); in perf_env__read_cpu_topology_map()
333 env->nr_cpus_avail = nr_cpus; in perf_env__read_cpu_topology_map()
348 return -ENOENT; in perf_env__read_pmu_mappings()
350 env->nr_pmu_mappings = pmu_num; in perf_env__read_pmu_mappings()
353 return -ENOMEM; in perf_env__read_pmu_mappings()
356 if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0) in perf_env__read_pmu_mappings()
363 env->pmu_mappings = strbuf_detach(&sb, NULL); in perf_env__read_pmu_mappings()
369 return -1; in perf_env__read_pmu_mappings()
380 free(env->cpuid); in perf_env__read_cpuid()
381 env->cpuid = strdup(cpuid); in perf_env__read_cpuid()
382 if (env->cpuid == NULL) in perf_env__read_cpuid()
391 if (env->arch) in perf_env__read_arch()
395 env->arch = strdup(uts.machine); in perf_env__read_arch()
397 return env->arch ? 0 : -ENOMEM; in perf_env__read_arch()
402 if (env->nr_cpus_avail == 0) in perf_env__read_nr_cpus_avail()
403 env->nr_cpus_avail = cpu__max_present_cpu().cpu; in perf_env__read_nr_cpus_avail()
405 return env->nr_cpus_avail ? 0 : -ENOENT; in perf_env__read_nr_cpus_avail()
410 return env && !perf_env__read_arch(env) ? env->arch : "unknown"; in perf_env__raw_arch()
415 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0; in perf_env__nr_cpus_avail()
420 zfree(&cache->type); in cpu_cache_level__free()
421 zfree(&cache->map); in cpu_cache_level__free()
422 zfree(&cache->size); in cpu_cache_level__free()
461 if (!env || !env->arch) { /* Assume local operation */ in perf_env__arch()
467 arch_name = env->arch; in perf_env__arch()
475 if (env->arch_strerrno == NULL) in perf_env__arch_strerrno()
476 env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env)); in perf_env__arch_strerrno()
478 return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function"; in perf_env__arch_strerrno()
488 if (!env->cpuid) { /* Assume local operation */ in perf_env__cpuid()
494 return env->cpuid; in perf_env__cpuid()
501 if (!env->nr_pmu_mappings) { /* Assume local operation */ in perf_env__nr_pmu_mappings()
507 return env->nr_pmu_mappings; in perf_env__nr_pmu_mappings()
514 if (!env->pmu_mappings) { /* Assume local operation */ in perf_env__pmu_mappings()
520 return env->pmu_mappings; in perf_env__pmu_mappings()
525 if (!env->nr_numa_map) { in perf_env__numa_node()
529 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
530 nn = &env->numa_nodes[i]; in perf_env__numa_node()
531 nr = max(nr, perf_cpu_map__max(nn->map).cpu); in perf_env__numa_node()
538 * it for missing cpus, which return node -1 in perf_env__numa_node()
540 env->numa_map = malloc(nr * sizeof(int)); in perf_env__numa_node()
541 if (!env->numa_map) in perf_env__numa_node()
542 return -1; in perf_env__numa_node()
545 env->numa_map[i] = -1; in perf_env__numa_node()
547 env->nr_numa_map = nr; in perf_env__numa_node()
549 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
553 nn = &env->numa_nodes[i]; in perf_env__numa_node()
554 perf_cpu_map__for_each_cpu(tmp, j, nn->map) in perf_env__numa_node()
555 env->numa_map[tmp.cpu] = i; in perf_env__numa_node()
559 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1; in perf_env__numa_node()
564 char *pmu_mapping = env->pmu_mappings, *colon; in perf_env__has_pmu_mapping()
566 for (int i = 0; i < env->nr_pmu_mappings; ++i) { in perf_env__has_pmu_mapping()
581 const char *cap) in perf_env__find_pmu_cap() argument
588 if (!pmu_name || !cap) in perf_env__find_pmu_cap()
591 cap_size = strlen(cap); in perf_env__find_pmu_cap()
596 memcpy(cap_eq, cap, cap_size); in perf_env__find_pmu_cap()
600 for (i = 0; i < env->nr_cpu_pmu_caps; i++) { in perf_env__find_pmu_cap()
601 if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) { in perf_env__find_pmu_cap()
603 return &env->cpu_pmu_caps[i][cap_size + 1]; in perf_env__find_pmu_cap()
609 for (i = 0; i < env->nr_pmus_with_caps; i++) { in perf_env__find_pmu_cap()
610 if (strcmp(env->pmu_caps[i].pmu_name, pmu_name)) in perf_env__find_pmu_cap()
613 ptr = env->pmu_caps[i].caps; in perf_env__find_pmu_cap()
615 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) { in perf_env__find_pmu_cap()
633 *nr = env->cpu_pmu_caps ? env->br_cntr_nr : in perf_env__find_br_cntr_info()
634 env->pmu_caps->br_cntr_nr; in perf_env__find_br_cntr_info()
638 *width = env->cpu_pmu_caps ? env->br_cntr_width : in perf_env__find_br_cntr_info()
639 env->pmu_caps->br_cntr_width; in perf_env__find_br_cntr_info()