1 // SPDX-License-Identifier: GPL-2.0
2 #include <api/fs/fs.h>
3 #include "cpumap.h"
4 #include "debug.h"
5 #include "event.h"
6 #include <assert.h>
7 #include <dirent.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <linux/bitmap.h>
11 #include "asm/bug.h"
12
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
15 #include <internal/cpumap.h>
16
17 static struct perf_cpu max_cpu_num;
18 static struct perf_cpu max_present_cpu_num;
19 static int max_node_num;
20 /**
21 * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
22 * CPU number.
23 */
24 static int *cpunode_map;
25
perf_record_cpu_map_data__test_bit(int i,const struct perf_record_cpu_map_data * data)26 bool perf_record_cpu_map_data__test_bit(int i,
27 const struct perf_record_cpu_map_data *data)
28 {
29 int bit_word32 = i / 32;
30 __u32 bit_mask32 = 1U << (i & 31);
31 int bit_word64 = i / 64;
32 __u64 bit_mask64 = ((__u64)1) << (i & 63);
33
34 return (data->mask32_data.long_size == 4)
35 ? (bit_word32 < data->mask32_data.nr) &&
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
37 : (bit_word64 < data->mask64_data.nr) &&
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
39 }
40
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data * data,int i,unsigned long * bitmap)42 static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
43 int i, unsigned long *bitmap)
44 {
45 #if __SIZEOF_LONG__ == 8
46 if (data->mask32_data.long_size == 4)
47 bitmap[0] = data->mask32_data.mask[i];
48 else
49 bitmap[0] = data->mask64_data.mask[i];
50 #else
51 if (data->mask32_data.long_size == 4) {
52 bitmap[0] = data->mask32_data.mask[i];
53 bitmap[1] = 0;
54 } else {
55 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
56 bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
57 bitmap[1] = (unsigned long)data->mask64_data.mask[i];
58 #else
59 bitmap[0] = (unsigned long)data->mask64_data.mask[i];
60 bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
61 #endif
62 }
63 #endif
64 }
cpu_map__from_entries(const struct perf_record_cpu_map_data * data)65 static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
66 {
67 struct perf_cpu_map *map;
68
69 map = perf_cpu_map__empty_new(data->cpus_data.nr);
70 if (map) {
71 unsigned i;
72
73 for (i = 0; i < data->cpus_data.nr; i++) {
74 /*
75 * Special treatment for -1, which is not real cpu number,
76 * and we need to use (int) -1 to initialize map[i],
77 * otherwise it would become 65535.
78 */
79 if (data->cpus_data.cpu[i] == (u16) -1)
80 RC_CHK_ACCESS(map)->map[i].cpu = -1;
81 else
82 RC_CHK_ACCESS(map)->map[i].cpu = (int) data->cpus_data.cpu[i];
83 }
84 }
85
86 return map;
87 }
88
cpu_map__from_mask(const struct perf_record_cpu_map_data * data)89 static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
90 {
91 DECLARE_BITMAP(local_copy, 64);
92 int weight = 0, mask_nr = data->mask32_data.nr;
93 struct perf_cpu_map *map;
94
95 for (int i = 0; i < mask_nr; i++) {
96 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
97 weight += bitmap_weight(local_copy, 64);
98 }
99
100 map = perf_cpu_map__empty_new(weight);
101 if (!map)
102 return NULL;
103
104 for (int i = 0, j = 0; i < mask_nr; i++) {
105 int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
106 int cpu;
107
108 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
109 for_each_set_bit(cpu, local_copy, 64)
110 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
111 }
112 return map;
113
114 }
115
cpu_map__from_range(const struct perf_record_cpu_map_data * data)116 static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
117 {
118 struct perf_cpu_map *map;
119 unsigned int i = 0;
120
121 map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
122 data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
123 if (!map)
124 return NULL;
125
126 if (data->range_cpu_data.any_cpu)
127 RC_CHK_ACCESS(map)->map[i++].cpu = -1;
128
129 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
130 i++, cpu++)
131 RC_CHK_ACCESS(map)->map[i].cpu = cpu;
132
133 return map;
134 }
135
cpu_map__new_data(const struct perf_record_cpu_map_data * data)136 struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
137 {
138 switch (data->type) {
139 case PERF_CPU_MAP__CPUS:
140 return cpu_map__from_entries(data);
141 case PERF_CPU_MAP__MASK:
142 return cpu_map__from_mask(data);
143 case PERF_CPU_MAP__RANGE_CPUS:
144 return cpu_map__from_range(data);
145 default:
146 pr_err("cpu_map__new_data unknown type %d\n", data->type);
147 return NULL;
148 }
149 }
150
cpu_map__fprintf(struct perf_cpu_map * map,FILE * fp)151 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
152 {
153 #define BUFSIZE 1024
154 char buf[BUFSIZE];
155
156 cpu_map__snprint(map, buf, sizeof(buf));
157 return fprintf(fp, "%s\n", buf);
158 #undef BUFSIZE
159 }
160
perf_cpu_map__empty_new(int nr)161 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
162 {
163 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr);
164
165 if (cpus != NULL) {
166 for (int i = 0; i < nr; i++)
167 RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
168 }
169
170 return cpus;
171 }
172
cpu_aggr_map__empty_new(int nr)173 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
174 {
175 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
176
177 if (cpus != NULL) {
178 int i;
179
180 cpus->nr = nr;
181 for (i = 0; i < nr; i++)
182 cpus->map[i] = aggr_cpu_id__empty();
183 }
184
185 return cpus;
186 }
187
cpu__get_topology_int(int cpu,const char * name,int * value)188 static int cpu__get_topology_int(int cpu, const char *name, int *value)
189 {
190 char path[PATH_MAX];
191
192 snprintf(path, PATH_MAX,
193 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
194
195 return sysfs__read_int(path, value);
196 }
197
cpu__get_socket_id(struct perf_cpu cpu)198 int cpu__get_socket_id(struct perf_cpu cpu)
199 {
200 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
201 return ret ?: value;
202 }
203
aggr_cpu_id__socket(struct perf_cpu cpu,void * data __maybe_unused)204 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
205 {
206 struct aggr_cpu_id id = aggr_cpu_id__empty();
207
208 id.socket = cpu__get_socket_id(cpu);
209 return id;
210 }
211
aggr_cpu_id__cmp(const void * a_pointer,const void * b_pointer)212 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
213 {
214 struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
215 struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
216
217 if (a->node != b->node)
218 return a->node - b->node;
219 else if (a->socket != b->socket)
220 return a->socket - b->socket;
221 else if (a->die != b->die)
222 return a->die - b->die;
223 else if (a->cluster != b->cluster)
224 return a->cluster - b->cluster;
225 else if (a->cache_lvl != b->cache_lvl)
226 return a->cache_lvl - b->cache_lvl;
227 else if (a->cache != b->cache)
228 return a->cache - b->cache;
229 else if (a->core != b->core)
230 return a->core - b->core;
231 else
232 return a->thread_idx - b->thread_idx;
233 }
234
cpu_aggr_map__new(const struct perf_cpu_map * cpus,aggr_cpu_id_get_t get_id,void * data,bool needs_sort)235 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
236 aggr_cpu_id_get_t get_id,
237 void *data, bool needs_sort)
238 {
239 int idx;
240 struct perf_cpu cpu;
241 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
242
243 if (!c)
244 return NULL;
245
246 /* Reset size as it may only be partially filled */
247 c->nr = 0;
248
249 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
250 bool duplicate = false;
251 struct aggr_cpu_id cpu_id = get_id(cpu, data);
252
253 for (int j = 0; j < c->nr; j++) {
254 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
255 duplicate = true;
256 break;
257 }
258 }
259 if (!duplicate) {
260 c->map[c->nr] = cpu_id;
261 c->nr++;
262 }
263 }
264 /* Trim. */
265 if (c->nr != perf_cpu_map__nr(cpus)) {
266 struct cpu_aggr_map *trimmed_c =
267 realloc(c,
268 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
269
270 if (trimmed_c)
271 c = trimmed_c;
272 }
273
274 /* ensure we process id in increasing order */
275 if (needs_sort)
276 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
277
278 return c;
279
280 }
281
cpu__get_die_id(struct perf_cpu cpu)282 int cpu__get_die_id(struct perf_cpu cpu)
283 {
284 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
285
286 return ret ?: value;
287 }
288
aggr_cpu_id__die(struct perf_cpu cpu,void * data)289 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
290 {
291 struct aggr_cpu_id id;
292 int die;
293
294 die = cpu__get_die_id(cpu);
295 /* There is no die_id on legacy system. */
296 if (die == -1)
297 die = 0;
298
299 /*
300 * die_id is relative to socket, so start
301 * with the socket ID and then add die to
302 * make a unique ID.
303 */
304 id = aggr_cpu_id__socket(cpu, data);
305 if (aggr_cpu_id__is_empty(&id))
306 return id;
307
308 id.die = die;
309 return id;
310 }
311
cpu__get_cluster_id(struct perf_cpu cpu)312 int cpu__get_cluster_id(struct perf_cpu cpu)
313 {
314 int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value);
315
316 return ret ?: value;
317 }
318
aggr_cpu_id__cluster(struct perf_cpu cpu,void * data)319 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data)
320 {
321 int cluster = cpu__get_cluster_id(cpu);
322 struct aggr_cpu_id id;
323
324 /* There is no cluster_id on legacy system. */
325 if (cluster == -1)
326 cluster = 0;
327
328 id = aggr_cpu_id__die(cpu, data);
329 if (aggr_cpu_id__is_empty(&id))
330 return id;
331
332 id.cluster = cluster;
333 return id;
334 }
335
cpu__get_core_id(struct perf_cpu cpu)336 int cpu__get_core_id(struct perf_cpu cpu)
337 {
338 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
339 return ret ?: value;
340 }
341
aggr_cpu_id__core(struct perf_cpu cpu,void * data)342 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
343 {
344 struct aggr_cpu_id id;
345 int core = cpu__get_core_id(cpu);
346
347 /* aggr_cpu_id__die returns a struct with socket die, and cluster set. */
348 id = aggr_cpu_id__cluster(cpu, data);
349 if (aggr_cpu_id__is_empty(&id))
350 return id;
351
352 /*
353 * core_id is relative to socket and die, we need a global id.
354 * So we combine the result from cpu_map__get_die with the core id
355 */
356 id.core = core;
357 return id;
358
359 }
360
aggr_cpu_id__cpu(struct perf_cpu cpu,void * data)361 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
362 {
363 struct aggr_cpu_id id;
364
365 /* aggr_cpu_id__core returns a struct with socket, die and core set. */
366 id = aggr_cpu_id__core(cpu, data);
367 if (aggr_cpu_id__is_empty(&id))
368 return id;
369
370 id.cpu = cpu;
371 return id;
372
373 }
374
aggr_cpu_id__node(struct perf_cpu cpu,void * data __maybe_unused)375 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
376 {
377 struct aggr_cpu_id id = aggr_cpu_id__empty();
378
379 id.node = cpu__get_node(cpu);
380 return id;
381 }
382
aggr_cpu_id__global(struct perf_cpu cpu,void * data __maybe_unused)383 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
384 {
385 struct aggr_cpu_id id = aggr_cpu_id__empty();
386
387 /* it always aggregates to the cpu 0 */
388 cpu.cpu = 0;
389 id.cpu = cpu;
390 return id;
391 }
392
393 /* setup simple routines to easily access node numbers given a cpu number */
get_max_num(char * path,int * max)394 static int get_max_num(char *path, int *max)
395 {
396 size_t num;
397 char *buf;
398 int err = 0;
399
400 if (filename__read_str(path, &buf, &num))
401 return -1;
402
403 buf[num] = '\0';
404
405 /* start on the right, to find highest node num */
406 while (--num) {
407 if ((buf[num] == ',') || (buf[num] == '-')) {
408 num++;
409 break;
410 }
411 }
412 if (sscanf(&buf[num], "%d", max) < 1) {
413 err = -1;
414 goto out;
415 }
416
417 /* convert from 0-based to 1-based */
418 (*max)++;
419
420 out:
421 free(buf);
422 return err;
423 }
424
425 /* Determine highest possible cpu in the system for sparse allocation */
set_max_cpu_num(void)426 static void set_max_cpu_num(void)
427 {
428 const char *mnt;
429 char path[PATH_MAX];
430 int ret = -1;
431
432 /* set up default */
433 max_cpu_num.cpu = 4096;
434 max_present_cpu_num.cpu = 4096;
435
436 mnt = sysfs__mountpoint();
437 if (!mnt)
438 goto out;
439
440 /* get the highest possible cpu number for a sparse allocation */
441 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
442 if (ret >= PATH_MAX) {
443 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
444 goto out;
445 }
446
447 ret = get_max_num(path, &max_cpu_num.cpu);
448 if (ret)
449 goto out;
450
451 /* get the highest present cpu number for a sparse allocation */
452 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
453 if (ret >= PATH_MAX) {
454 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
455 goto out;
456 }
457
458 ret = get_max_num(path, &max_present_cpu_num.cpu);
459
460 out:
461 if (ret)
462 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
463 }
464
465 /* Determine highest possible node in the system for sparse allocation */
set_max_node_num(void)466 static void set_max_node_num(void)
467 {
468 const char *mnt;
469 char path[PATH_MAX];
470 int ret = -1;
471
472 /* set up default */
473 max_node_num = 8;
474
475 mnt = sysfs__mountpoint();
476 if (!mnt)
477 goto out;
478
479 /* get the highest possible cpu number for a sparse allocation */
480 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
481 if (ret >= PATH_MAX) {
482 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
483 goto out;
484 }
485
486 ret = get_max_num(path, &max_node_num);
487
488 out:
489 if (ret)
490 pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
491 }
492
cpu__max_node(void)493 int cpu__max_node(void)
494 {
495 if (unlikely(!max_node_num))
496 set_max_node_num();
497
498 return max_node_num;
499 }
500
cpu__max_cpu(void)501 struct perf_cpu cpu__max_cpu(void)
502 {
503 if (unlikely(!max_cpu_num.cpu))
504 set_max_cpu_num();
505
506 return max_cpu_num;
507 }
508
cpu__max_present_cpu(void)509 struct perf_cpu cpu__max_present_cpu(void)
510 {
511 if (unlikely(!max_present_cpu_num.cpu))
512 set_max_cpu_num();
513
514 return max_present_cpu_num;
515 }
516
517
cpu__get_node(struct perf_cpu cpu)518 int cpu__get_node(struct perf_cpu cpu)
519 {
520 if (unlikely(cpunode_map == NULL)) {
521 pr_debug("cpu_map not initialized\n");
522 return -1;
523 }
524
525 return cpunode_map[cpu.cpu];
526 }
527
init_cpunode_map(void)528 static int init_cpunode_map(void)
529 {
530 int i;
531
532 set_max_cpu_num();
533 set_max_node_num();
534
535 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
536 if (!cpunode_map) {
537 pr_err("%s: calloc failed\n", __func__);
538 return -1;
539 }
540
541 for (i = 0; i < max_cpu_num.cpu; i++)
542 cpunode_map[i] = -1;
543
544 return 0;
545 }
546
cpu__setup_cpunode_map(void)547 int cpu__setup_cpunode_map(void)
548 {
549 struct dirent *dent1, *dent2;
550 DIR *dir1, *dir2;
551 unsigned int cpu, mem;
552 char buf[PATH_MAX];
553 char path[PATH_MAX];
554 const char *mnt;
555 int n;
556
557 /* initialize globals */
558 if (init_cpunode_map())
559 return -1;
560
561 mnt = sysfs__mountpoint();
562 if (!mnt)
563 return 0;
564
565 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
566 if (n >= PATH_MAX) {
567 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
568 return -1;
569 }
570
571 dir1 = opendir(path);
572 if (!dir1)
573 return 0;
574
575 /* walk tree and setup map */
576 while ((dent1 = readdir(dir1)) != NULL) {
577 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
578 continue;
579
580 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
581 if (n >= PATH_MAX) {
582 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
583 continue;
584 }
585
586 dir2 = opendir(buf);
587 if (!dir2)
588 continue;
589 while ((dent2 = readdir(dir2)) != NULL) {
590 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
591 continue;
592 cpunode_map[cpu] = mem;
593 }
594 closedir(dir2);
595 }
596 closedir(dir1);
597 return 0;
598 }
599
cpu_map__snprint(struct perf_cpu_map * map,char * buf,size_t size)600 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
601 {
602 int i, start = -1;
603 bool first = true;
604 size_t ret = 0;
605
606 #define COMMA first ? "" : ","
607
608 for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
609 struct perf_cpu cpu = { .cpu = INT_MAX };
610 bool last = i == perf_cpu_map__nr(map);
611
612 if (!last)
613 cpu = perf_cpu_map__cpu(map, i);
614
615 if (start == -1) {
616 start = i;
617 if (last) {
618 ret += snprintf(buf + ret, size - ret,
619 "%s%d", COMMA,
620 perf_cpu_map__cpu(map, i).cpu);
621 }
622 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
623 int end = i - 1;
624
625 if (start == end) {
626 ret += snprintf(buf + ret, size - ret,
627 "%s%d", COMMA,
628 perf_cpu_map__cpu(map, start).cpu);
629 } else {
630 ret += snprintf(buf + ret, size - ret,
631 "%s%d-%d", COMMA,
632 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
633 }
634 first = false;
635 start = i;
636 }
637 }
638
639 #undef COMMA
640
641 pr_debug2("cpumask list: %s\n", buf);
642 return ret;
643 }
644
hex_char(unsigned char val)645 static char hex_char(unsigned char val)
646 {
647 if (val < 10)
648 return val + '0';
649 if (val < 16)
650 return val - 10 + 'a';
651 return '?';
652 }
653
cpu_map__snprint_mask(struct perf_cpu_map * map,char * buf,size_t size)654 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
655 {
656 int idx;
657 char *ptr = buf;
658 unsigned char *bitmap;
659 struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
660
661 if (buf == NULL)
662 return 0;
663
664 bitmap = zalloc(last_cpu.cpu / 8 + 1);
665 if (bitmap == NULL) {
666 buf[0] = '\0';
667 return 0;
668 }
669
670 perf_cpu_map__for_each_cpu(c, idx, map)
671 bitmap[c.cpu / 8] |= 1 << (c.cpu % 8);
672
673 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
674 unsigned char bits = bitmap[cpu / 8];
675
676 if (cpu % 8)
677 bits >>= 4;
678 else
679 bits &= 0xf;
680
681 *ptr++ = hex_char(bits);
682 if ((cpu % 32) == 0 && cpu > 0)
683 *ptr++ = ',';
684 }
685 *ptr = '\0';
686 free(bitmap);
687
688 buf[size - 1] = '\0';
689 return ptr - buf;
690 }
691
cpu_map__online(void)692 struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
693 {
694 static struct perf_cpu_map *online;
695
696 if (!online)
697 online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
698
699 return online;
700 }
701
aggr_cpu_id__equal(const struct aggr_cpu_id * a,const struct aggr_cpu_id * b)702 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
703 {
704 return a->thread_idx == b->thread_idx &&
705 a->node == b->node &&
706 a->socket == b->socket &&
707 a->die == b->die &&
708 a->cluster == b->cluster &&
709 a->cache_lvl == b->cache_lvl &&
710 a->cache == b->cache &&
711 a->core == b->core &&
712 a->cpu.cpu == b->cpu.cpu;
713 }
714
aggr_cpu_id__is_empty(const struct aggr_cpu_id * a)715 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
716 {
717 return a->thread_idx == -1 &&
718 a->node == -1 &&
719 a->socket == -1 &&
720 a->die == -1 &&
721 a->cluster == -1 &&
722 a->cache_lvl == -1 &&
723 a->cache == -1 &&
724 a->core == -1 &&
725 a->cpu.cpu == -1;
726 }
727
aggr_cpu_id__empty(void)728 struct aggr_cpu_id aggr_cpu_id__empty(void)
729 {
730 struct aggr_cpu_id ret = {
731 .thread_idx = -1,
732 .node = -1,
733 .socket = -1,
734 .die = -1,
735 .cluster = -1,
736 .cache_lvl = -1,
737 .cache = -1,
738 .core = -1,
739 .cpu = (struct perf_cpu){ .cpu = -1 },
740 };
741 return ret;
742 }
743