1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <perf/cpumap.h>
3 #include <stdlib.h>
4 #include <linux/refcount.h>
5 #include <internal/cpumap.h>
6 #include <asm/bug.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <ctype.h>
11 #include <limits.h>
12 #include "internal.h"
13
perf_cpu_map__set_nr(struct perf_cpu_map * map,int nr_cpus)14 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
15 {
16 RC_CHK_ACCESS(map)->nr = nr_cpus;
17 }
18
perf_cpu_map__alloc(int nr_cpus)19 struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
20 {
21 RC_STRUCT(perf_cpu_map) *cpus;
22 struct perf_cpu_map *result;
23
24 if (nr_cpus == 0)
25 return NULL;
26
27 cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
28 if (ADD_RC_CHK(result, cpus)) {
29 cpus->nr = nr_cpus;
30 refcount_set(&cpus->refcnt, 1);
31 }
32 return result;
33 }
34
perf_cpu_map__new_any_cpu(void)35 struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
36 {
37 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
38
39 if (cpus)
40 RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
41
42 return cpus;
43 }
44
cpu_map__delete(struct perf_cpu_map * map)45 static void cpu_map__delete(struct perf_cpu_map *map)
46 {
47 if (map) {
48 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
49 "cpu_map refcnt unbalanced\n");
50 RC_CHK_FREE(map);
51 }
52 }
53
perf_cpu_map__get(struct perf_cpu_map * map)54 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
55 {
56 struct perf_cpu_map *result;
57
58 if (RC_CHK_GET(result, map))
59 refcount_inc(perf_cpu_map__refcnt(map));
60
61 return result;
62 }
63
perf_cpu_map__put(struct perf_cpu_map * map)64 void perf_cpu_map__put(struct perf_cpu_map *map)
65 {
66 if (map) {
67 if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
68 cpu_map__delete(map);
69 else
70 RC_CHK_PUT(map);
71 }
72 }
73
cpu_map__new_sysconf(void)74 static struct perf_cpu_map *cpu_map__new_sysconf(void)
75 {
76 struct perf_cpu_map *cpus;
77 int nr_cpus, nr_cpus_conf;
78
79 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
80 if (nr_cpus < 0)
81 return NULL;
82
83 nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
84 if (nr_cpus != nr_cpus_conf) {
85 pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
86 nr_cpus, nr_cpus_conf, nr_cpus);
87 }
88
89 cpus = perf_cpu_map__alloc(nr_cpus);
90 if (cpus != NULL) {
91 int i;
92
93 for (i = 0; i < nr_cpus; ++i)
94 RC_CHK_ACCESS(cpus)->map[i].cpu = i;
95 }
96
97 return cpus;
98 }
99
cpu_map__new_sysfs_online(void)100 static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
101 {
102 struct perf_cpu_map *cpus = NULL;
103 FILE *onlnf;
104
105 onlnf = fopen("/sys/devices/system/cpu/online", "r");
106 if (onlnf) {
107 cpus = perf_cpu_map__read(onlnf);
108 fclose(onlnf);
109 }
110 return cpus;
111 }
112
perf_cpu_map__new_online_cpus(void)113 struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
114 {
115 struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
116
117 if (cpus)
118 return cpus;
119
120 return cpu_map__new_sysconf();
121 }
122
123
cmp_cpu(const void * a,const void * b)124 static int cmp_cpu(const void *a, const void *b)
125 {
126 const struct perf_cpu *cpu_a = a, *cpu_b = b;
127
128 return cpu_a->cpu - cpu_b->cpu;
129 }
130
__perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)131 static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
132 {
133 return RC_CHK_ACCESS(cpus)->map[idx];
134 }
135
cpu_map__trim_new(int nr_cpus,const struct perf_cpu * tmp_cpus)136 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
137 {
138 size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
139 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
140 int i, j;
141
142 if (cpus != NULL) {
143 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
144 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
145 /* Remove dups */
146 j = 0;
147 for (i = 0; i < nr_cpus; i++) {
148 if (i == 0 ||
149 __perf_cpu_map__cpu(cpus, i).cpu !=
150 __perf_cpu_map__cpu(cpus, i - 1).cpu) {
151 RC_CHK_ACCESS(cpus)->map[j++].cpu =
152 __perf_cpu_map__cpu(cpus, i).cpu;
153 }
154 }
155 perf_cpu_map__set_nr(cpus, j);
156 assert(j <= nr_cpus);
157 }
158 return cpus;
159 }
160
perf_cpu_map__read(FILE * file)161 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
162 {
163 struct perf_cpu_map *cpus = NULL;
164 int nr_cpus = 0;
165 struct perf_cpu *tmp_cpus = NULL, *tmp;
166 int max_entries = 0;
167 int n, cpu, prev;
168 char sep;
169
170 sep = 0;
171 prev = -1;
172 for (;;) {
173 n = fscanf(file, "%u%c", &cpu, &sep);
174 if (n <= 0)
175 break;
176 if (prev >= 0) {
177 int new_max = nr_cpus + cpu - prev - 1;
178
179 WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
180 "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
181
182 if (new_max >= max_entries) {
183 max_entries = new_max + MAX_NR_CPUS / 2;
184 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
185 if (tmp == NULL)
186 goto out_free_tmp;
187 tmp_cpus = tmp;
188 }
189
190 while (++prev < cpu)
191 tmp_cpus[nr_cpus++].cpu = prev;
192 }
193 if (nr_cpus == max_entries) {
194 max_entries += MAX_NR_CPUS;
195 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
196 if (tmp == NULL)
197 goto out_free_tmp;
198 tmp_cpus = tmp;
199 }
200
201 tmp_cpus[nr_cpus++].cpu = cpu;
202 if (n == 2 && sep == '-')
203 prev = cpu;
204 else
205 prev = -1;
206 if (n == 1 || sep == '\n')
207 break;
208 }
209
210 if (nr_cpus > 0)
211 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
212 out_free_tmp:
213 free(tmp_cpus);
214 return cpus;
215 }
216
perf_cpu_map__new(const char * cpu_list)217 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
218 {
219 struct perf_cpu_map *cpus = NULL;
220 unsigned long start_cpu, end_cpu = 0;
221 char *p = NULL;
222 int i, nr_cpus = 0;
223 struct perf_cpu *tmp_cpus = NULL, *tmp;
224 int max_entries = 0;
225
226 if (!cpu_list)
227 return perf_cpu_map__new_online_cpus();
228
229 /*
230 * must handle the case of empty cpumap to cover
231 * TOPOLOGY header for NUMA nodes with no CPU
232 * ( e.g., because of CPU hotplug)
233 */
234 if (!isdigit(*cpu_list) && *cpu_list != '\0')
235 goto out;
236
237 while (isdigit(*cpu_list)) {
238 p = NULL;
239 start_cpu = strtoul(cpu_list, &p, 0);
240 if (start_cpu >= INT_MAX
241 || (*p != '\0' && *p != ',' && *p != '-'))
242 goto invalid;
243
244 if (*p == '-') {
245 cpu_list = ++p;
246 p = NULL;
247 end_cpu = strtoul(cpu_list, &p, 0);
248
249 if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
250 goto invalid;
251
252 if (end_cpu < start_cpu)
253 goto invalid;
254 } else {
255 end_cpu = start_cpu;
256 }
257
258 WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
259 "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
260
261 for (; start_cpu <= end_cpu; start_cpu++) {
262 /* check for duplicates */
263 for (i = 0; i < nr_cpus; i++)
264 if (tmp_cpus[i].cpu == (int)start_cpu)
265 goto invalid;
266
267 if (nr_cpus == max_entries) {
268 max_entries += MAX_NR_CPUS;
269 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
270 if (tmp == NULL)
271 goto invalid;
272 tmp_cpus = tmp;
273 }
274 tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
275 }
276 if (*p)
277 ++p;
278
279 cpu_list = p;
280 }
281
282 if (nr_cpus > 0)
283 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
284 else if (*cpu_list != '\0') {
285 pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
286 cpu_list);
287 cpus = perf_cpu_map__new_online_cpus();
288 } else
289 cpus = perf_cpu_map__new_any_cpu();
290 invalid:
291 free(tmp_cpus);
292 out:
293 return cpus;
294 }
295
__perf_cpu_map__nr(const struct perf_cpu_map * cpus)296 static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
297 {
298 return RC_CHK_ACCESS(cpus)->nr;
299 }
300
perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)301 struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
302 {
303 struct perf_cpu result = {
304 .cpu = -1
305 };
306
307 if (cpus && idx < __perf_cpu_map__nr(cpus))
308 return __perf_cpu_map__cpu(cpus, idx);
309
310 return result;
311 }
312
perf_cpu_map__nr(const struct perf_cpu_map * cpus)313 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
314 {
315 return cpus ? __perf_cpu_map__nr(cpus) : 1;
316 }
317
perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map * map)318 bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
319 {
320 return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
321 }
322
perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map * map)323 bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
324 {
325 if (!map)
326 return true;
327
328 return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
329 }
330
perf_cpu_map__is_empty(const struct perf_cpu_map * map)331 bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
332 {
333 return map == NULL;
334 }
335
perf_cpu_map__idx(const struct perf_cpu_map * cpus,struct perf_cpu cpu)336 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
337 {
338 int low, high;
339
340 if (!cpus)
341 return -1;
342
343 low = 0;
344 high = __perf_cpu_map__nr(cpus);
345 while (low < high) {
346 int idx = (low + high) / 2;
347 struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
348
349 if (cpu_at_idx.cpu == cpu.cpu)
350 return idx;
351
352 if (cpu_at_idx.cpu > cpu.cpu)
353 high = idx;
354 else
355 low = idx + 1;
356 }
357
358 return -1;
359 }
360
perf_cpu_map__has(const struct perf_cpu_map * cpus,struct perf_cpu cpu)361 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
362 {
363 return perf_cpu_map__idx(cpus, cpu) != -1;
364 }
365
perf_cpu_map__equal(const struct perf_cpu_map * lhs,const struct perf_cpu_map * rhs)366 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
367 {
368 int nr;
369
370 if (lhs == rhs)
371 return true;
372
373 if (!lhs || !rhs)
374 return false;
375
376 nr = __perf_cpu_map__nr(lhs);
377 if (nr != __perf_cpu_map__nr(rhs))
378 return false;
379
380 for (int idx = 0; idx < nr; idx++) {
381 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
382 return false;
383 }
384 return true;
385 }
386
perf_cpu_map__has_any_cpu(const struct perf_cpu_map * map)387 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
388 {
389 return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
390 }
391
perf_cpu_map__min(const struct perf_cpu_map * map)392 struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
393 {
394 struct perf_cpu cpu, result = {
395 .cpu = -1
396 };
397 int idx;
398
399 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
400 result = cpu;
401 break;
402 }
403 return result;
404 }
405
perf_cpu_map__max(const struct perf_cpu_map * map)406 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
407 {
408 struct perf_cpu result = {
409 .cpu = -1
410 };
411
412 // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
413 return __perf_cpu_map__nr(map) > 0
414 ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
415 : result;
416 }
417
418 /** Is 'b' a subset of 'a'. */
perf_cpu_map__is_subset(const struct perf_cpu_map * a,const struct perf_cpu_map * b)419 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
420 {
421 if (a == b || !b)
422 return true;
423 if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
424 return false;
425
426 for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
427 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
428 return false;
429 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
430 j++;
431 if (j == __perf_cpu_map__nr(b))
432 return true;
433 }
434 }
435 return false;
436 }
437
438 /*
439 * Merge two cpumaps
440 *
441 * orig either gets freed and replaced with a new map, or reused
442 * with no reference count change (similar to "realloc")
443 * other has its reference count increased.
444 */
445
perf_cpu_map__merge(struct perf_cpu_map * orig,struct perf_cpu_map * other)446 struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
447 struct perf_cpu_map *other)
448 {
449 struct perf_cpu *tmp_cpus;
450 int tmp_len;
451 int i, j, k;
452 struct perf_cpu_map *merged;
453
454 if (perf_cpu_map__is_subset(orig, other))
455 return orig;
456 if (perf_cpu_map__is_subset(other, orig)) {
457 perf_cpu_map__put(orig);
458 return perf_cpu_map__get(other);
459 }
460
461 tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
462 tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
463 if (!tmp_cpus)
464 return NULL;
465
466 /* Standard merge algorithm from wikipedia */
467 i = j = k = 0;
468 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
469 if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
470 if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
471 j++;
472 tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
473 } else
474 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
475 }
476
477 while (i < __perf_cpu_map__nr(orig))
478 tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
479
480 while (j < __perf_cpu_map__nr(other))
481 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
482 assert(k <= tmp_len);
483
484 merged = cpu_map__trim_new(k, tmp_cpus);
485 free(tmp_cpus);
486 perf_cpu_map__put(orig);
487 return merged;
488 }
489
perf_cpu_map__intersect(struct perf_cpu_map * orig,struct perf_cpu_map * other)490 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
491 struct perf_cpu_map *other)
492 {
493 struct perf_cpu *tmp_cpus;
494 int tmp_len;
495 int i, j, k;
496 struct perf_cpu_map *merged = NULL;
497
498 if (perf_cpu_map__is_subset(other, orig))
499 return perf_cpu_map__get(orig);
500 if (perf_cpu_map__is_subset(orig, other))
501 return perf_cpu_map__get(other);
502
503 tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
504 tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
505 if (!tmp_cpus)
506 return NULL;
507
508 i = j = k = 0;
509 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
510 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
511 i++;
512 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
513 j++;
514 else {
515 j++;
516 tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
517 }
518 }
519 if (k)
520 merged = cpu_map__trim_new(k, tmp_cpus);
521 free(tmp_cpus);
522 return merged;
523 }
524