1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4   *
5   * Parts came from builtin-{top,stat,record}.c, see those files for further
6   * copyright notes.
7   */
8  #include <api/fs/fs.h>
9  #include <errno.h>
10  #include <inttypes.h>
11  #include <poll.h>
12  #include "cpumap.h"
13  #include "util/mmap.h"
14  #include "thread_map.h"
15  #include "target.h"
16  #include "evlist.h"
17  #include "evsel.h"
18  #include "record.h"
19  #include "debug.h"
20  #include "units.h"
21  #include "bpf_counter.h"
22  #include <internal/lib.h> // page_size
23  #include "affinity.h"
24  #include "../perf.h"
25  #include "asm/bug.h"
26  #include "bpf-event.h"
27  #include "util/event.h"
28  #include "util/string2.h"
29  #include "util/perf_api_probe.h"
30  #include "util/evsel_fprintf.h"
31  #include "util/pmu.h"
32  #include "util/sample.h"
33  #include "util/bpf-filter.h"
34  #include "util/stat.h"
35  #include "util/util.h"
36  #include "util/env.h"
37  #include "util/intel-tpebs.h"
38  #include <signal.h>
39  #include <unistd.h>
40  #include <sched.h>
41  #include <stdlib.h>
42  
43  #include "parse-events.h"
44  #include <subcmd/parse-options.h>
45  
46  #include <fcntl.h>
47  #include <sys/ioctl.h>
48  #include <sys/mman.h>
49  #include <sys/prctl.h>
50  #include <sys/timerfd.h>
51  
52  #include <linux/bitops.h>
53  #include <linux/hash.h>
54  #include <linux/log2.h>
55  #include <linux/err.h>
56  #include <linux/string.h>
57  #include <linux/time64.h>
58  #include <linux/zalloc.h>
59  #include <perf/evlist.h>
60  #include <perf/evsel.h>
61  #include <perf/cpumap.h>
62  #include <perf/mmap.h>
63  
64  #include <internal/xyarray.h>
65  
66  #ifdef LACKS_SIGQUEUE_PROTOTYPE
67  int sigqueue(pid_t pid, int sig, const union sigval value);
68  #endif
69  
70  #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
71  #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
72  
evlist__init(struct evlist * evlist,struct perf_cpu_map * cpus,struct perf_thread_map * threads)73  void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
74  		  struct perf_thread_map *threads)
75  {
76  	perf_evlist__init(&evlist->core);
77  	perf_evlist__set_maps(&evlist->core, cpus, threads);
78  	evlist->workload.pid = -1;
79  	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
80  	evlist->ctl_fd.fd = -1;
81  	evlist->ctl_fd.ack = -1;
82  	evlist->ctl_fd.pos = -1;
83  	evlist->nr_br_cntr = -1;
84  }
85  
evlist__new(void)86  struct evlist *evlist__new(void)
87  {
88  	struct evlist *evlist = zalloc(sizeof(*evlist));
89  
90  	if (evlist != NULL)
91  		evlist__init(evlist, NULL, NULL);
92  
93  	return evlist;
94  }
95  
evlist__new_default(void)96  struct evlist *evlist__new_default(void)
97  {
98  	struct evlist *evlist = evlist__new();
99  	bool can_profile_kernel;
100  	int err;
101  
102  	if (!evlist)
103  		return NULL;
104  
105  	can_profile_kernel = perf_event_paranoid_check(1);
106  	err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
107  	if (err) {
108  		evlist__delete(evlist);
109  		return NULL;
110  	}
111  
112  	if (evlist->core.nr_entries > 1) {
113  		struct evsel *evsel;
114  
115  		evlist__for_each_entry(evlist, evsel)
116  			evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
117  	}
118  
119  	return evlist;
120  }
121  
evlist__new_dummy(void)122  struct evlist *evlist__new_dummy(void)
123  {
124  	struct evlist *evlist = evlist__new();
125  
126  	if (evlist && evlist__add_dummy(evlist)) {
127  		evlist__delete(evlist);
128  		evlist = NULL;
129  	}
130  
131  	return evlist;
132  }
133  
134  /**
135   * evlist__set_id_pos - set the positions of event ids.
136   * @evlist: selected event list
137   *
138   * Events with compatible sample types all have the same id_pos
139   * and is_pos.  For convenience, put a copy on evlist.
140   */
evlist__set_id_pos(struct evlist * evlist)141  void evlist__set_id_pos(struct evlist *evlist)
142  {
143  	struct evsel *first = evlist__first(evlist);
144  
145  	evlist->id_pos = first->id_pos;
146  	evlist->is_pos = first->is_pos;
147  }
148  
evlist__update_id_pos(struct evlist * evlist)149  static void evlist__update_id_pos(struct evlist *evlist)
150  {
151  	struct evsel *evsel;
152  
153  	evlist__for_each_entry(evlist, evsel)
154  		evsel__calc_id_pos(evsel);
155  
156  	evlist__set_id_pos(evlist);
157  }
158  
evlist__purge(struct evlist * evlist)159  static void evlist__purge(struct evlist *evlist)
160  {
161  	struct evsel *pos, *n;
162  
163  	evlist__for_each_entry_safe(evlist, n, pos) {
164  		list_del_init(&pos->core.node);
165  		pos->evlist = NULL;
166  		evsel__delete(pos);
167  	}
168  
169  	evlist->core.nr_entries = 0;
170  }
171  
evlist__exit(struct evlist * evlist)172  void evlist__exit(struct evlist *evlist)
173  {
174  	event_enable_timer__exit(&evlist->eet);
175  	zfree(&evlist->mmap);
176  	zfree(&evlist->overwrite_mmap);
177  	perf_evlist__exit(&evlist->core);
178  }
179  
evlist__delete(struct evlist * evlist)180  void evlist__delete(struct evlist *evlist)
181  {
182  	if (evlist == NULL)
183  		return;
184  
185  	tpebs_delete();
186  	evlist__free_stats(evlist);
187  	evlist__munmap(evlist);
188  	evlist__close(evlist);
189  	evlist__purge(evlist);
190  	evlist__exit(evlist);
191  	free(evlist);
192  }
193  
evlist__add(struct evlist * evlist,struct evsel * entry)194  void evlist__add(struct evlist *evlist, struct evsel *entry)
195  {
196  	perf_evlist__add(&evlist->core, &entry->core);
197  	entry->evlist = evlist;
198  	entry->tracking = !entry->core.idx;
199  
200  	if (evlist->core.nr_entries == 1)
201  		evlist__set_id_pos(evlist);
202  }
203  
evlist__remove(struct evlist * evlist,struct evsel * evsel)204  void evlist__remove(struct evlist *evlist, struct evsel *evsel)
205  {
206  	evsel->evlist = NULL;
207  	perf_evlist__remove(&evlist->core, &evsel->core);
208  }
209  
evlist__splice_list_tail(struct evlist * evlist,struct list_head * list)210  void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
211  {
212  	while (!list_empty(list)) {
213  		struct evsel *evsel, *temp, *leader = NULL;
214  
215  		__evlist__for_each_entry_safe(list, temp, evsel) {
216  			list_del_init(&evsel->core.node);
217  			evlist__add(evlist, evsel);
218  			leader = evsel;
219  			break;
220  		}
221  
222  		__evlist__for_each_entry_safe(list, temp, evsel) {
223  			if (evsel__has_leader(evsel, leader)) {
224  				list_del_init(&evsel->core.node);
225  				evlist__add(evlist, evsel);
226  			}
227  		}
228  	}
229  }
230  
__evlist__set_tracepoints_handlers(struct evlist * evlist,const struct evsel_str_handler * assocs,size_t nr_assocs)231  int __evlist__set_tracepoints_handlers(struct evlist *evlist,
232  				       const struct evsel_str_handler *assocs, size_t nr_assocs)
233  {
234  	size_t i;
235  	int err;
236  
237  	for (i = 0; i < nr_assocs; i++) {
238  		// Adding a handler for an event not in this evlist, just ignore it.
239  		struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
240  		if (evsel == NULL)
241  			continue;
242  
243  		err = -EEXIST;
244  		if (evsel->handler != NULL)
245  			goto out;
246  		evsel->handler = assocs[i].handler;
247  	}
248  
249  	err = 0;
250  out:
251  	return err;
252  }
253  
evlist__set_leader(struct evlist * evlist)254  static void evlist__set_leader(struct evlist *evlist)
255  {
256  	perf_evlist__set_leader(&evlist->core);
257  }
258  
evlist__dummy_event(struct evlist * evlist)259  static struct evsel *evlist__dummy_event(struct evlist *evlist)
260  {
261  	struct perf_event_attr attr = {
262  		.type	= PERF_TYPE_SOFTWARE,
263  		.config = PERF_COUNT_SW_DUMMY,
264  		.size	= sizeof(attr), /* to capture ABI version */
265  		/* Avoid frequency mode for dummy events to avoid associated timers. */
266  		.freq = 0,
267  		.sample_period = 1,
268  	};
269  
270  	return evsel__new_idx(&attr, evlist->core.nr_entries);
271  }
272  
evlist__add_dummy(struct evlist * evlist)273  int evlist__add_dummy(struct evlist *evlist)
274  {
275  	struct evsel *evsel = evlist__dummy_event(evlist);
276  
277  	if (evsel == NULL)
278  		return -ENOMEM;
279  
280  	evlist__add(evlist, evsel);
281  	return 0;
282  }
283  
evlist__add_aux_dummy(struct evlist * evlist,bool system_wide)284  struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
285  {
286  	struct evsel *evsel = evlist__dummy_event(evlist);
287  
288  	if (!evsel)
289  		return NULL;
290  
291  	evsel->core.attr.exclude_kernel = 1;
292  	evsel->core.attr.exclude_guest = 1;
293  	evsel->core.attr.exclude_hv = 1;
294  	evsel->core.system_wide = system_wide;
295  	evsel->no_aux_samples = true;
296  	evsel->name = strdup("dummy:u");
297  
298  	evlist__add(evlist, evsel);
299  	return evsel;
300  }
301  
302  #ifdef HAVE_LIBTRACEEVENT
evlist__add_sched_switch(struct evlist * evlist,bool system_wide)303  struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide)
304  {
305  	struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0,
306  					       /*format=*/true);
307  
308  	if (IS_ERR(evsel))
309  		return evsel;
310  
311  	evsel__set_sample_bit(evsel, CPU);
312  	evsel__set_sample_bit(evsel, TIME);
313  
314  	evsel->core.system_wide = system_wide;
315  	evsel->no_aux_samples = true;
316  
317  	evlist__add(evlist, evsel);
318  	return evsel;
319  }
320  #endif
321  
evlist__add_attrs(struct evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)322  int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
323  {
324  	struct evsel *evsel, *n;
325  	LIST_HEAD(head);
326  	size_t i;
327  
328  	for (i = 0; i < nr_attrs; i++) {
329  		evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
330  		if (evsel == NULL)
331  			goto out_delete_partial_list;
332  		list_add_tail(&evsel->core.node, &head);
333  	}
334  
335  	evlist__splice_list_tail(evlist, &head);
336  
337  	return 0;
338  
339  out_delete_partial_list:
340  	__evlist__for_each_entry_safe(&head, n, evsel)
341  		evsel__delete(evsel);
342  	return -1;
343  }
344  
__evlist__add_default_attrs(struct evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)345  int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
346  {
347  	size_t i;
348  
349  	for (i = 0; i < nr_attrs; i++)
350  		event_attr_init(attrs + i);
351  
352  	return evlist__add_attrs(evlist, attrs, nr_attrs);
353  }
354  
arch_evlist__add_default_attrs(struct evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)355  __weak int arch_evlist__add_default_attrs(struct evlist *evlist,
356  					  struct perf_event_attr *attrs,
357  					  size_t nr_attrs)
358  {
359  	if (!nr_attrs)
360  		return 0;
361  
362  	return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
363  }
364  
evlist__find_tracepoint_by_id(struct evlist * evlist,int id)365  struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
366  {
367  	struct evsel *evsel;
368  
369  	evlist__for_each_entry(evlist, evsel) {
370  		if (evsel->core.attr.type   == PERF_TYPE_TRACEPOINT &&
371  		    (int)evsel->core.attr.config == id)
372  			return evsel;
373  	}
374  
375  	return NULL;
376  }
377  
evlist__find_tracepoint_by_name(struct evlist * evlist,const char * name)378  struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
379  {
380  	struct evsel *evsel;
381  
382  	evlist__for_each_entry(evlist, evsel) {
383  		if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
384  		    (strcmp(evsel->name, name) == 0))
385  			return evsel;
386  	}
387  
388  	return NULL;
389  }
390  
391  #ifdef HAVE_LIBTRACEEVENT
evlist__add_newtp(struct evlist * evlist,const char * sys,const char * name,void * handler)392  int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
393  {
394  	struct evsel *evsel = evsel__newtp(sys, name);
395  
396  	if (IS_ERR(evsel))
397  		return -1;
398  
399  	evsel->handler = handler;
400  	evlist__add(evlist, evsel);
401  	return 0;
402  }
403  #endif
404  
evlist__cpu_begin(struct evlist * evlist,struct affinity * affinity)405  struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
406  {
407  	struct evlist_cpu_iterator itr = {
408  		.container = evlist,
409  		.evsel = NULL,
410  		.cpu_map_idx = 0,
411  		.evlist_cpu_map_idx = 0,
412  		.evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
413  		.cpu = (struct perf_cpu){ .cpu = -1},
414  		.affinity = affinity,
415  	};
416  
417  	if (evlist__empty(evlist)) {
418  		/* Ensure the empty list doesn't iterate. */
419  		itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr;
420  	} else {
421  		itr.evsel = evlist__first(evlist);
422  		if (itr.affinity) {
423  			itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
424  			affinity__set(itr.affinity, itr.cpu.cpu);
425  			itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
426  			/*
427  			 * If this CPU isn't in the evsel's cpu map then advance
428  			 * through the list.
429  			 */
430  			if (itr.cpu_map_idx == -1)
431  				evlist_cpu_iterator__next(&itr);
432  		}
433  	}
434  	return itr;
435  }
436  
evlist_cpu_iterator__next(struct evlist_cpu_iterator * evlist_cpu_itr)437  void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
438  {
439  	while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
440  		evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
441  		evlist_cpu_itr->cpu_map_idx =
442  			perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
443  					  evlist_cpu_itr->cpu);
444  		if (evlist_cpu_itr->cpu_map_idx != -1)
445  			return;
446  	}
447  	evlist_cpu_itr->evlist_cpu_map_idx++;
448  	if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
449  		evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
450  		evlist_cpu_itr->cpu =
451  			perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
452  					  evlist_cpu_itr->evlist_cpu_map_idx);
453  		if (evlist_cpu_itr->affinity)
454  			affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
455  		evlist_cpu_itr->cpu_map_idx =
456  			perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
457  					  evlist_cpu_itr->cpu);
458  		/*
459  		 * If this CPU isn't in the evsel's cpu map then advance through
460  		 * the list.
461  		 */
462  		if (evlist_cpu_itr->cpu_map_idx == -1)
463  			evlist_cpu_iterator__next(evlist_cpu_itr);
464  	}
465  }
466  
evlist_cpu_iterator__end(const struct evlist_cpu_iterator * evlist_cpu_itr)467  bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
468  {
469  	return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
470  }
471  
evsel__strcmp(struct evsel * pos,char * evsel_name)472  static int evsel__strcmp(struct evsel *pos, char *evsel_name)
473  {
474  	if (!evsel_name)
475  		return 0;
476  	if (evsel__is_dummy_event(pos))
477  		return 1;
478  	return !evsel__name_is(pos, evsel_name);
479  }
480  
evlist__is_enabled(struct evlist * evlist)481  static int evlist__is_enabled(struct evlist *evlist)
482  {
483  	struct evsel *pos;
484  
485  	evlist__for_each_entry(evlist, pos) {
486  		if (!evsel__is_group_leader(pos) || !pos->core.fd)
487  			continue;
488  		/* If at least one event is enabled, evlist is enabled. */
489  		if (!pos->disabled)
490  			return true;
491  	}
492  	return false;
493  }
494  
__evlist__disable(struct evlist * evlist,char * evsel_name,bool excl_dummy)495  static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy)
496  {
497  	struct evsel *pos;
498  	struct evlist_cpu_iterator evlist_cpu_itr;
499  	struct affinity saved_affinity, *affinity = NULL;
500  	bool has_imm = false;
501  
502  	// See explanation in evlist__close()
503  	if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
504  		if (affinity__setup(&saved_affinity) < 0)
505  			return;
506  		affinity = &saved_affinity;
507  	}
508  
509  	/* Disable 'immediate' events last */
510  	for (int imm = 0; imm <= 1; imm++) {
511  		evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
512  			pos = evlist_cpu_itr.evsel;
513  			if (evsel__strcmp(pos, evsel_name))
514  				continue;
515  			if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
516  				continue;
517  			if (excl_dummy && evsel__is_dummy_event(pos))
518  				continue;
519  			if (pos->immediate)
520  				has_imm = true;
521  			if (pos->immediate != imm)
522  				continue;
523  			evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
524  		}
525  		if (!has_imm)
526  			break;
527  	}
528  
529  	affinity__cleanup(affinity);
530  	evlist__for_each_entry(evlist, pos) {
531  		if (evsel__strcmp(pos, evsel_name))
532  			continue;
533  		if (!evsel__is_group_leader(pos) || !pos->core.fd)
534  			continue;
535  		if (excl_dummy && evsel__is_dummy_event(pos))
536  			continue;
537  		pos->disabled = true;
538  	}
539  
540  	/*
541  	 * If we disabled only single event, we need to check
542  	 * the enabled state of the evlist manually.
543  	 */
544  	if (evsel_name)
545  		evlist->enabled = evlist__is_enabled(evlist);
546  	else
547  		evlist->enabled = false;
548  }
549  
evlist__disable(struct evlist * evlist)550  void evlist__disable(struct evlist *evlist)
551  {
552  	__evlist__disable(evlist, NULL, false);
553  }
554  
evlist__disable_non_dummy(struct evlist * evlist)555  void evlist__disable_non_dummy(struct evlist *evlist)
556  {
557  	__evlist__disable(evlist, NULL, true);
558  }
559  
evlist__disable_evsel(struct evlist * evlist,char * evsel_name)560  void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
561  {
562  	__evlist__disable(evlist, evsel_name, false);
563  }
564  
__evlist__enable(struct evlist * evlist,char * evsel_name,bool excl_dummy)565  static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy)
566  {
567  	struct evsel *pos;
568  	struct evlist_cpu_iterator evlist_cpu_itr;
569  	struct affinity saved_affinity, *affinity = NULL;
570  
571  	// See explanation in evlist__close()
572  	if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
573  		if (affinity__setup(&saved_affinity) < 0)
574  			return;
575  		affinity = &saved_affinity;
576  	}
577  
578  	evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
579  		pos = evlist_cpu_itr.evsel;
580  		if (evsel__strcmp(pos, evsel_name))
581  			continue;
582  		if (!evsel__is_group_leader(pos) || !pos->core.fd)
583  			continue;
584  		if (excl_dummy && evsel__is_dummy_event(pos))
585  			continue;
586  		evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
587  	}
588  	affinity__cleanup(affinity);
589  	evlist__for_each_entry(evlist, pos) {
590  		if (evsel__strcmp(pos, evsel_name))
591  			continue;
592  		if (!evsel__is_group_leader(pos) || !pos->core.fd)
593  			continue;
594  		if (excl_dummy && evsel__is_dummy_event(pos))
595  			continue;
596  		pos->disabled = false;
597  	}
598  
599  	/*
600  	 * Even single event sets the 'enabled' for evlist,
601  	 * so the toggle can work properly and toggle to
602  	 * 'disabled' state.
603  	 */
604  	evlist->enabled = true;
605  }
606  
evlist__enable(struct evlist * evlist)607  void evlist__enable(struct evlist *evlist)
608  {
609  	__evlist__enable(evlist, NULL, false);
610  }
611  
evlist__enable_non_dummy(struct evlist * evlist)612  void evlist__enable_non_dummy(struct evlist *evlist)
613  {
614  	__evlist__enable(evlist, NULL, true);
615  }
616  
evlist__enable_evsel(struct evlist * evlist,char * evsel_name)617  void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
618  {
619  	__evlist__enable(evlist, evsel_name, false);
620  }
621  
evlist__toggle_enable(struct evlist * evlist)622  void evlist__toggle_enable(struct evlist *evlist)
623  {
624  	(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
625  }
626  
evlist__add_pollfd(struct evlist * evlist,int fd)627  int evlist__add_pollfd(struct evlist *evlist, int fd)
628  {
629  	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
630  }
631  
evlist__filter_pollfd(struct evlist * evlist,short revents_and_mask)632  int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
633  {
634  	return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
635  }
636  
637  #ifdef HAVE_EVENTFD_SUPPORT
evlist__add_wakeup_eventfd(struct evlist * evlist,int fd)638  int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
639  {
640  	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
641  				       fdarray_flag__nonfilterable |
642  				       fdarray_flag__non_perf_event);
643  }
644  #endif
645  
evlist__poll(struct evlist * evlist,int timeout)646  int evlist__poll(struct evlist *evlist, int timeout)
647  {
648  	return perf_evlist__poll(&evlist->core, timeout);
649  }
650  
evlist__id2sid(struct evlist * evlist,u64 id)651  struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
652  {
653  	struct hlist_head *head;
654  	struct perf_sample_id *sid;
655  	int hash;
656  
657  	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
658  	head = &evlist->core.heads[hash];
659  
660  	hlist_for_each_entry(sid, head, node)
661  		if (sid->id == id)
662  			return sid;
663  
664  	return NULL;
665  }
666  
evlist__id2evsel(struct evlist * evlist,u64 id)667  struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
668  {
669  	struct perf_sample_id *sid;
670  
671  	if (evlist->core.nr_entries == 1 || !id)
672  		return evlist__first(evlist);
673  
674  	sid = evlist__id2sid(evlist, id);
675  	if (sid)
676  		return container_of(sid->evsel, struct evsel, core);
677  
678  	if (!evlist__sample_id_all(evlist))
679  		return evlist__first(evlist);
680  
681  	return NULL;
682  }
683  
evlist__id2evsel_strict(struct evlist * evlist,u64 id)684  struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
685  {
686  	struct perf_sample_id *sid;
687  
688  	if (!id)
689  		return NULL;
690  
691  	sid = evlist__id2sid(evlist, id);
692  	if (sid)
693  		return container_of(sid->evsel, struct evsel, core);
694  
695  	return NULL;
696  }
697  
evlist__event2id(struct evlist * evlist,union perf_event * event,u64 * id)698  static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
699  {
700  	const __u64 *array = event->sample.array;
701  	ssize_t n;
702  
703  	n = (event->header.size - sizeof(event->header)) >> 3;
704  
705  	if (event->header.type == PERF_RECORD_SAMPLE) {
706  		if (evlist->id_pos >= n)
707  			return -1;
708  		*id = array[evlist->id_pos];
709  	} else {
710  		if (evlist->is_pos > n)
711  			return -1;
712  		n -= evlist->is_pos;
713  		*id = array[n];
714  	}
715  	return 0;
716  }
717  
evlist__event2evsel(struct evlist * evlist,union perf_event * event)718  struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
719  {
720  	struct evsel *first = evlist__first(evlist);
721  	struct hlist_head *head;
722  	struct perf_sample_id *sid;
723  	int hash;
724  	u64 id;
725  
726  	if (evlist->core.nr_entries == 1)
727  		return first;
728  
729  	if (!first->core.attr.sample_id_all &&
730  	    event->header.type != PERF_RECORD_SAMPLE)
731  		return first;
732  
733  	if (evlist__event2id(evlist, event, &id))
734  		return NULL;
735  
736  	/* Synthesized events have an id of zero */
737  	if (!id)
738  		return first;
739  
740  	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
741  	head = &evlist->core.heads[hash];
742  
743  	hlist_for_each_entry(sid, head, node) {
744  		if (sid->id == id)
745  			return container_of(sid->evsel, struct evsel, core);
746  	}
747  	return NULL;
748  }
749  
evlist__set_paused(struct evlist * evlist,bool value)750  static int evlist__set_paused(struct evlist *evlist, bool value)
751  {
752  	int i;
753  
754  	if (!evlist->overwrite_mmap)
755  		return 0;
756  
757  	for (i = 0; i < evlist->core.nr_mmaps; i++) {
758  		int fd = evlist->overwrite_mmap[i].core.fd;
759  		int err;
760  
761  		if (fd < 0)
762  			continue;
763  		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
764  		if (err)
765  			return err;
766  	}
767  	return 0;
768  }
769  
evlist__pause(struct evlist * evlist)770  static int evlist__pause(struct evlist *evlist)
771  {
772  	return evlist__set_paused(evlist, true);
773  }
774  
evlist__resume(struct evlist * evlist)775  static int evlist__resume(struct evlist *evlist)
776  {
777  	return evlist__set_paused(evlist, false);
778  }
779  
evlist__munmap_nofree(struct evlist * evlist)780  static void evlist__munmap_nofree(struct evlist *evlist)
781  {
782  	int i;
783  
784  	if (evlist->mmap)
785  		for (i = 0; i < evlist->core.nr_mmaps; i++)
786  			perf_mmap__munmap(&evlist->mmap[i].core);
787  
788  	if (evlist->overwrite_mmap)
789  		for (i = 0; i < evlist->core.nr_mmaps; i++)
790  			perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
791  }
792  
evlist__munmap(struct evlist * evlist)793  void evlist__munmap(struct evlist *evlist)
794  {
795  	evlist__munmap_nofree(evlist);
796  	zfree(&evlist->mmap);
797  	zfree(&evlist->overwrite_mmap);
798  }
799  
perf_mmap__unmap_cb(struct perf_mmap * map)800  static void perf_mmap__unmap_cb(struct perf_mmap *map)
801  {
802  	struct mmap *m = container_of(map, struct mmap, core);
803  
804  	mmap__munmap(m);
805  }
806  
evlist__alloc_mmap(struct evlist * evlist,bool overwrite)807  static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
808  				       bool overwrite)
809  {
810  	int i;
811  	struct mmap *map;
812  
813  	map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
814  	if (!map)
815  		return NULL;
816  
817  	for (i = 0; i < evlist->core.nr_mmaps; i++) {
818  		struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
819  
820  		/*
821  		 * When the perf_mmap() call is made we grab one refcount, plus
822  		 * one extra to let perf_mmap__consume() get the last
823  		 * events after all real references (perf_mmap__get()) are
824  		 * dropped.
825  		 *
826  		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
827  		 * thus does perf_mmap__get() on it.
828  		 */
829  		perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
830  	}
831  
832  	return map;
833  }
834  
835  static void
perf_evlist__mmap_cb_idx(struct perf_evlist * _evlist,struct perf_evsel * _evsel,struct perf_mmap_param * _mp,int idx)836  perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
837  			 struct perf_evsel *_evsel,
838  			 struct perf_mmap_param *_mp,
839  			 int idx)
840  {
841  	struct evlist *evlist = container_of(_evlist, struct evlist, core);
842  	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
843  	struct evsel *evsel = container_of(_evsel, struct evsel, core);
844  
845  	auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
846  }
847  
848  static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist * _evlist,bool overwrite,int idx)849  perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
850  {
851  	struct evlist *evlist = container_of(_evlist, struct evlist, core);
852  	struct mmap *maps;
853  
854  	maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
855  
856  	if (!maps) {
857  		maps = evlist__alloc_mmap(evlist, overwrite);
858  		if (!maps)
859  			return NULL;
860  
861  		if (overwrite) {
862  			evlist->overwrite_mmap = maps;
863  			if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
864  				evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
865  		} else {
866  			evlist->mmap = maps;
867  		}
868  	}
869  
870  	return &maps[idx].core;
871  }
872  
873  static int
perf_evlist__mmap_cb_mmap(struct perf_mmap * _map,struct perf_mmap_param * _mp,int output,struct perf_cpu cpu)874  perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
875  			  int output, struct perf_cpu cpu)
876  {
877  	struct mmap *map = container_of(_map, struct mmap, core);
878  	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
879  
880  	return mmap__mmap(map, mp, output, cpu);
881  }
882  
perf_event_mlock_kb_in_pages(void)883  unsigned long perf_event_mlock_kb_in_pages(void)
884  {
885  	unsigned long pages;
886  	int max;
887  
888  	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
889  		/*
890  		 * Pick a once upon a time good value, i.e. things look
891  		 * strange since we can't read a sysctl value, but lets not
892  		 * die yet...
893  		 */
894  		max = 512;
895  	} else {
896  		max -= (page_size / 1024);
897  	}
898  
899  	pages = (max * 1024) / page_size;
900  	if (!is_power_of_2(pages))
901  		pages = rounddown_pow_of_two(pages);
902  
903  	return pages;
904  }
905  
evlist__mmap_size(unsigned long pages)906  size_t evlist__mmap_size(unsigned long pages)
907  {
908  	if (pages == UINT_MAX)
909  		pages = perf_event_mlock_kb_in_pages();
910  	else if (!is_power_of_2(pages))
911  		return 0;
912  
913  	return (pages + 1) * page_size;
914  }
915  
parse_pages_arg(const char * str,unsigned long min,unsigned long max)916  static long parse_pages_arg(const char *str, unsigned long min,
917  			    unsigned long max)
918  {
919  	unsigned long pages, val;
920  	static struct parse_tag tags[] = {
921  		{ .tag  = 'B', .mult = 1       },
922  		{ .tag  = 'K', .mult = 1 << 10 },
923  		{ .tag  = 'M', .mult = 1 << 20 },
924  		{ .tag  = 'G', .mult = 1 << 30 },
925  		{ .tag  = 0 },
926  	};
927  
928  	if (str == NULL)
929  		return -EINVAL;
930  
931  	val = parse_tag_value(str, tags);
932  	if (val != (unsigned long) -1) {
933  		/* we got file size value */
934  		pages = PERF_ALIGN(val, page_size) / page_size;
935  	} else {
936  		/* we got pages count value */
937  		char *eptr;
938  		pages = strtoul(str, &eptr, 10);
939  		if (*eptr != '\0')
940  			return -EINVAL;
941  	}
942  
943  	if (pages == 0 && min == 0) {
944  		/* leave number of pages at 0 */
945  	} else if (!is_power_of_2(pages)) {
946  		char buf[100];
947  
948  		/* round pages up to next power of 2 */
949  		pages = roundup_pow_of_two(pages);
950  		if (!pages)
951  			return -EINVAL;
952  
953  		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
954  		pr_info("rounding mmap pages size to %s (%lu pages)\n",
955  			buf, pages);
956  	}
957  
958  	if (pages > max)
959  		return -EINVAL;
960  
961  	return pages;
962  }
963  
__evlist__parse_mmap_pages(unsigned int * mmap_pages,const char * str)964  int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
965  {
966  	unsigned long max = UINT_MAX;
967  	long pages;
968  
969  	if (max > SIZE_MAX / page_size)
970  		max = SIZE_MAX / page_size;
971  
972  	pages = parse_pages_arg(str, 1, max);
973  	if (pages < 0) {
974  		pr_err("Invalid argument for --mmap_pages/-m\n");
975  		return -1;
976  	}
977  
978  	*mmap_pages = pages;
979  	return 0;
980  }
981  
evlist__parse_mmap_pages(const struct option * opt,const char * str,int unset __maybe_unused)982  int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
983  {
984  	return __evlist__parse_mmap_pages(opt->value, str);
985  }
986  
987  /**
988   * evlist__mmap_ex - Create mmaps to receive events.
989   * @evlist: list of events
990   * @pages: map length in pages
991   * @overwrite: overwrite older events?
992   * @auxtrace_pages - auxtrace map length in pages
993   * @auxtrace_overwrite - overwrite older auxtrace data?
994   *
995   * If @overwrite is %false the user needs to signal event consumption using
996   * perf_mmap__write_tail().  Using evlist__mmap_read() does this
997   * automatically.
998   *
999   * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1000   * consumption using auxtrace_mmap__write_tail().
1001   *
1002   * Return: %0 on success, negative error code otherwise.
1003   */
evlist__mmap_ex(struct evlist * evlist,unsigned int pages,unsigned int auxtrace_pages,bool auxtrace_overwrite,int nr_cblocks,int affinity,int flush,int comp_level)1004  int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
1005  			 unsigned int auxtrace_pages,
1006  			 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
1007  			 int comp_level)
1008  {
1009  	/*
1010  	 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
1011  	 * Its value is decided by evsel's write_backward.
1012  	 * So &mp should not be passed through const pointer.
1013  	 */
1014  	struct mmap_params mp = {
1015  		.nr_cblocks	= nr_cblocks,
1016  		.affinity	= affinity,
1017  		.flush		= flush,
1018  		.comp_level	= comp_level
1019  	};
1020  	struct perf_evlist_mmap_ops ops = {
1021  		.idx  = perf_evlist__mmap_cb_idx,
1022  		.get  = perf_evlist__mmap_cb_get,
1023  		.mmap = perf_evlist__mmap_cb_mmap,
1024  	};
1025  
1026  	evlist->core.mmap_len = evlist__mmap_size(pages);
1027  	pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
1028  
1029  	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
1030  				   auxtrace_pages, auxtrace_overwrite);
1031  
1032  	return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
1033  }
1034  
evlist__mmap(struct evlist * evlist,unsigned int pages)1035  int evlist__mmap(struct evlist *evlist, unsigned int pages)
1036  {
1037  	return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
1038  }
1039  
evlist__create_maps(struct evlist * evlist,struct target * target)1040  int evlist__create_maps(struct evlist *evlist, struct target *target)
1041  {
1042  	bool all_threads = (target->per_thread && target->system_wide);
1043  	struct perf_cpu_map *cpus;
1044  	struct perf_thread_map *threads;
1045  
1046  	/*
1047  	 * If specify '-a' and '--per-thread' to perf record, perf record
1048  	 * will override '--per-thread'. target->per_thread = false and
1049  	 * target->system_wide = true.
1050  	 *
1051  	 * If specify '--per-thread' only to perf record,
1052  	 * target->per_thread = true and target->system_wide = false.
1053  	 *
1054  	 * So target->per_thread && target->system_wide is false.
1055  	 * For perf record, thread_map__new_str doesn't call
1056  	 * thread_map__new_all_cpus. That will keep perf record's
1057  	 * current behavior.
1058  	 *
1059  	 * For perf stat, it allows the case that target->per_thread and
1060  	 * target->system_wide are all true. It means to collect system-wide
1061  	 * per-thread data. thread_map__new_str will call
1062  	 * thread_map__new_all_cpus to enumerate all threads.
1063  	 */
1064  	threads = thread_map__new_str(target->pid, target->tid, target->uid,
1065  				      all_threads);
1066  
1067  	if (!threads)
1068  		return -1;
1069  
1070  	if (target__uses_dummy_map(target) && !evlist__has_bpf_output(evlist))
1071  		cpus = perf_cpu_map__new_any_cpu();
1072  	else
1073  		cpus = perf_cpu_map__new(target->cpu_list);
1074  
1075  	if (!cpus)
1076  		goto out_delete_threads;
1077  
1078  	evlist->core.has_user_cpus = !!target->cpu_list;
1079  
1080  	perf_evlist__set_maps(&evlist->core, cpus, threads);
1081  
1082  	/* as evlist now has references, put count here */
1083  	perf_cpu_map__put(cpus);
1084  	perf_thread_map__put(threads);
1085  
1086  	return 0;
1087  
1088  out_delete_threads:
1089  	perf_thread_map__put(threads);
1090  	return -1;
1091  }
1092  
evlist__apply_filters(struct evlist * evlist,struct evsel ** err_evsel,struct target * target)1093  int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
1094  			  struct target *target)
1095  {
1096  	struct evsel *evsel;
1097  	int err = 0;
1098  
1099  	evlist__for_each_entry(evlist, evsel) {
1100  		/*
1101  		 * filters only work for tracepoint event, which doesn't have cpu limit.
1102  		 * So evlist and evsel should always be same.
1103  		 */
1104  		if (evsel->filter) {
1105  			err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1106  			if (err) {
1107  				*err_evsel = evsel;
1108  				break;
1109  			}
1110  		}
1111  
1112  		/*
1113  		 * non-tracepoint events can have BPF filters.
1114  		 */
1115  		if (!list_empty(&evsel->bpf_filters)) {
1116  			err = perf_bpf_filter__prepare(evsel, target);
1117  			if (err) {
1118  				*err_evsel = evsel;
1119  				break;
1120  			}
1121  		}
1122  	}
1123  
1124  	return err;
1125  }
1126  
evlist__set_tp_filter(struct evlist * evlist,const char * filter)1127  int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1128  {
1129  	struct evsel *evsel;
1130  	int err = 0;
1131  
1132  	if (filter == NULL)
1133  		return -1;
1134  
1135  	evlist__for_each_entry(evlist, evsel) {
1136  		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1137  			continue;
1138  
1139  		err = evsel__set_filter(evsel, filter);
1140  		if (err)
1141  			break;
1142  	}
1143  
1144  	return err;
1145  }
1146  
evlist__append_tp_filter(struct evlist * evlist,const char * filter)1147  int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1148  {
1149  	struct evsel *evsel;
1150  	int err = 0;
1151  
1152  	if (filter == NULL)
1153  		return -1;
1154  
1155  	evlist__for_each_entry(evlist, evsel) {
1156  		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1157  			continue;
1158  
1159  		err = evsel__append_tp_filter(evsel, filter);
1160  		if (err)
1161  			break;
1162  	}
1163  
1164  	return err;
1165  }
1166  
asprintf__tp_filter_pids(size_t npids,pid_t * pids)1167  char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1168  {
1169  	char *filter;
1170  	size_t i;
1171  
1172  	for (i = 0; i < npids; ++i) {
1173  		if (i == 0) {
1174  			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1175  				return NULL;
1176  		} else {
1177  			char *tmp;
1178  
1179  			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1180  				goto out_free;
1181  
1182  			free(filter);
1183  			filter = tmp;
1184  		}
1185  	}
1186  
1187  	return filter;
1188  out_free:
1189  	free(filter);
1190  	return NULL;
1191  }
1192  
evlist__set_tp_filter_pids(struct evlist * evlist,size_t npids,pid_t * pids)1193  int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1194  {
1195  	char *filter = asprintf__tp_filter_pids(npids, pids);
1196  	int ret = evlist__set_tp_filter(evlist, filter);
1197  
1198  	free(filter);
1199  	return ret;
1200  }
1201  
evlist__set_tp_filter_pid(struct evlist * evlist,pid_t pid)1202  int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1203  {
1204  	return evlist__set_tp_filter_pids(evlist, 1, &pid);
1205  }
1206  
evlist__append_tp_filter_pids(struct evlist * evlist,size_t npids,pid_t * pids)1207  int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1208  {
1209  	char *filter = asprintf__tp_filter_pids(npids, pids);
1210  	int ret = evlist__append_tp_filter(evlist, filter);
1211  
1212  	free(filter);
1213  	return ret;
1214  }
1215  
evlist__append_tp_filter_pid(struct evlist * evlist,pid_t pid)1216  int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1217  {
1218  	return evlist__append_tp_filter_pids(evlist, 1, &pid);
1219  }
1220  
evlist__valid_sample_type(struct evlist * evlist)1221  bool evlist__valid_sample_type(struct evlist *evlist)
1222  {
1223  	struct evsel *pos;
1224  
1225  	if (evlist->core.nr_entries == 1)
1226  		return true;
1227  
1228  	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1229  		return false;
1230  
1231  	evlist__for_each_entry(evlist, pos) {
1232  		if (pos->id_pos != evlist->id_pos ||
1233  		    pos->is_pos != evlist->is_pos)
1234  			return false;
1235  	}
1236  
1237  	return true;
1238  }
1239  
__evlist__combined_sample_type(struct evlist * evlist)1240  u64 __evlist__combined_sample_type(struct evlist *evlist)
1241  {
1242  	struct evsel *evsel;
1243  
1244  	if (evlist->combined_sample_type)
1245  		return evlist->combined_sample_type;
1246  
1247  	evlist__for_each_entry(evlist, evsel)
1248  		evlist->combined_sample_type |= evsel->core.attr.sample_type;
1249  
1250  	return evlist->combined_sample_type;
1251  }
1252  
evlist__combined_sample_type(struct evlist * evlist)1253  u64 evlist__combined_sample_type(struct evlist *evlist)
1254  {
1255  	evlist->combined_sample_type = 0;
1256  	return __evlist__combined_sample_type(evlist);
1257  }
1258  
evlist__combined_branch_type(struct evlist * evlist)1259  u64 evlist__combined_branch_type(struct evlist *evlist)
1260  {
1261  	struct evsel *evsel;
1262  	u64 branch_type = 0;
1263  
1264  	evlist__for_each_entry(evlist, evsel)
1265  		branch_type |= evsel->core.attr.branch_sample_type;
1266  	return branch_type;
1267  }
1268  
1269  static struct evsel *
evlist__find_dup_event_from_prev(struct evlist * evlist,struct evsel * event)1270  evlist__find_dup_event_from_prev(struct evlist *evlist, struct evsel *event)
1271  {
1272  	struct evsel *pos;
1273  
1274  	evlist__for_each_entry(evlist, pos) {
1275  		if (event == pos)
1276  			break;
1277  		if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1278  		    !strcmp(pos->name, event->name))
1279  			return pos;
1280  	}
1281  	return NULL;
1282  }
1283  
1284  #define MAX_NR_ABBR_NAME	(26 * 11)
1285  
1286  /*
1287   * The abbr name is from A to Z9. If the number of event
1288   * which requires the branch counter > MAX_NR_ABBR_NAME,
1289   * return NA.
1290   */
evlist__new_abbr_name(char * name)1291  static void evlist__new_abbr_name(char *name)
1292  {
1293  	static int idx;
1294  	int i = idx / 26;
1295  
1296  	if (idx >= MAX_NR_ABBR_NAME) {
1297  		name[0] = 'N';
1298  		name[1] = 'A';
1299  		name[2] = '\0';
1300  		return;
1301  	}
1302  
1303  	name[0] = 'A' + (idx % 26);
1304  
1305  	if (!i)
1306  		name[1] = '\0';
1307  	else {
1308  		name[1] = '0' + i - 1;
1309  		name[2] = '\0';
1310  	}
1311  
1312  	idx++;
1313  }
1314  
evlist__update_br_cntr(struct evlist * evlist)1315  void evlist__update_br_cntr(struct evlist *evlist)
1316  {
1317  	struct evsel *evsel, *dup;
1318  	int i = 0;
1319  
1320  	evlist__for_each_entry(evlist, evsel) {
1321  		if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) {
1322  			evsel->br_cntr_idx = i++;
1323  			evsel__leader(evsel)->br_cntr_nr++;
1324  
1325  			dup = evlist__find_dup_event_from_prev(evlist, evsel);
1326  			if (dup)
1327  				memcpy(evsel->abbr_name, dup->abbr_name, 3 * sizeof(char));
1328  			else
1329  				evlist__new_abbr_name(evsel->abbr_name);
1330  		}
1331  	}
1332  	evlist->nr_br_cntr = i;
1333  }
1334  
evlist__valid_read_format(struct evlist * evlist)1335  bool evlist__valid_read_format(struct evlist *evlist)
1336  {
1337  	struct evsel *first = evlist__first(evlist), *pos = first;
1338  	u64 read_format = first->core.attr.read_format;
1339  	u64 sample_type = first->core.attr.sample_type;
1340  
1341  	evlist__for_each_entry(evlist, pos) {
1342  		if (read_format != pos->core.attr.read_format) {
1343  			pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1344  				 read_format, (u64)pos->core.attr.read_format);
1345  		}
1346  	}
1347  
1348  	/* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
1349  	if ((sample_type & PERF_SAMPLE_READ) &&
1350  	    !(read_format & PERF_FORMAT_ID)) {
1351  		return false;
1352  	}
1353  
1354  	return true;
1355  }
1356  
evlist__id_hdr_size(struct evlist * evlist)1357  u16 evlist__id_hdr_size(struct evlist *evlist)
1358  {
1359  	struct evsel *first = evlist__first(evlist);
1360  
1361  	return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0;
1362  }
1363  
evlist__valid_sample_id_all(struct evlist * evlist)1364  bool evlist__valid_sample_id_all(struct evlist *evlist)
1365  {
1366  	struct evsel *first = evlist__first(evlist), *pos = first;
1367  
1368  	evlist__for_each_entry_continue(evlist, pos) {
1369  		if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1370  			return false;
1371  	}
1372  
1373  	return true;
1374  }
1375  
evlist__sample_id_all(struct evlist * evlist)1376  bool evlist__sample_id_all(struct evlist *evlist)
1377  {
1378  	struct evsel *first = evlist__first(evlist);
1379  	return first->core.attr.sample_id_all;
1380  }
1381  
evlist__set_selected(struct evlist * evlist,struct evsel * evsel)1382  void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
1383  {
1384  	evlist->selected = evsel;
1385  }
1386  
evlist__close(struct evlist * evlist)1387  void evlist__close(struct evlist *evlist)
1388  {
1389  	struct evsel *evsel;
1390  	struct evlist_cpu_iterator evlist_cpu_itr;
1391  	struct affinity affinity;
1392  
1393  	/*
1394  	 * With perf record core.user_requested_cpus is usually NULL.
1395  	 * Use the old method to handle this for now.
1396  	 */
1397  	if (!evlist->core.user_requested_cpus ||
1398  	    cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
1399  		evlist__for_each_entry_reverse(evlist, evsel)
1400  			evsel__close(evsel);
1401  		return;
1402  	}
1403  
1404  	if (affinity__setup(&affinity) < 0)
1405  		return;
1406  
1407  	evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
1408  		perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
1409  				      evlist_cpu_itr.cpu_map_idx);
1410  	}
1411  
1412  	affinity__cleanup(&affinity);
1413  	evlist__for_each_entry_reverse(evlist, evsel) {
1414  		perf_evsel__free_fd(&evsel->core);
1415  		perf_evsel__free_id(&evsel->core);
1416  	}
1417  	perf_evlist__reset_id_hash(&evlist->core);
1418  }
1419  
evlist__create_syswide_maps(struct evlist * evlist)1420  static int evlist__create_syswide_maps(struct evlist *evlist)
1421  {
1422  	struct perf_cpu_map *cpus;
1423  	struct perf_thread_map *threads;
1424  
1425  	/*
1426  	 * Try reading /sys/devices/system/cpu/online to get
1427  	 * an all cpus map.
1428  	 *
1429  	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1430  	 * code needs an overhaul to properly forward the
1431  	 * error, and we may not want to do that fallback to a
1432  	 * default cpu identity map :-\
1433  	 */
1434  	cpus = perf_cpu_map__new_online_cpus();
1435  	if (!cpus)
1436  		goto out;
1437  
1438  	threads = perf_thread_map__new_dummy();
1439  	if (!threads)
1440  		goto out_put;
1441  
1442  	perf_evlist__set_maps(&evlist->core, cpus, threads);
1443  
1444  	perf_thread_map__put(threads);
1445  out_put:
1446  	perf_cpu_map__put(cpus);
1447  out:
1448  	return -ENOMEM;
1449  }
1450  
evlist__open(struct evlist * evlist)1451  int evlist__open(struct evlist *evlist)
1452  {
1453  	struct evsel *evsel;
1454  	int err;
1455  
1456  	/*
1457  	 * Default: one fd per CPU, all threads, aka systemwide
1458  	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1459  	 */
1460  	if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
1461  		err = evlist__create_syswide_maps(evlist);
1462  		if (err < 0)
1463  			goto out_err;
1464  	}
1465  
1466  	evlist__update_id_pos(evlist);
1467  
1468  	evlist__for_each_entry(evlist, evsel) {
1469  		err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1470  		if (err < 0)
1471  			goto out_err;
1472  	}
1473  
1474  	return 0;
1475  out_err:
1476  	evlist__close(evlist);
1477  	errno = -err;
1478  	return err;
1479  }
1480  
evlist__prepare_workload(struct evlist * evlist,struct target * target,const char * argv[],bool pipe_output,void (* exec_error)(int signo,siginfo_t * info,void * ucontext))1481  int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
1482  			     bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1483  {
1484  	int child_ready_pipe[2], go_pipe[2];
1485  	char bf;
1486  
1487  	if (pipe(child_ready_pipe) < 0) {
1488  		perror("failed to create 'ready' pipe");
1489  		return -1;
1490  	}
1491  
1492  	if (pipe(go_pipe) < 0) {
1493  		perror("failed to create 'go' pipe");
1494  		goto out_close_ready_pipe;
1495  	}
1496  
1497  	evlist->workload.pid = fork();
1498  	if (evlist->workload.pid < 0) {
1499  		perror("failed to fork");
1500  		goto out_close_pipes;
1501  	}
1502  
1503  	if (!evlist->workload.pid) {
1504  		int ret;
1505  
1506  		if (pipe_output)
1507  			dup2(2, 1);
1508  
1509  		signal(SIGTERM, SIG_DFL);
1510  
1511  		close(child_ready_pipe[0]);
1512  		close(go_pipe[1]);
1513  		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1514  
1515  		/*
1516  		 * Change the name of this process not to confuse --exclude-perf users
1517  		 * that sees 'perf' in the window up to the execvp() and thinks that
1518  		 * perf samples are not being excluded.
1519  		 */
1520  		prctl(PR_SET_NAME, "perf-exec");
1521  
1522  		/*
1523  		 * Tell the parent we're ready to go
1524  		 */
1525  		close(child_ready_pipe[1]);
1526  
1527  		/*
1528  		 * Wait until the parent tells us to go.
1529  		 */
1530  		ret = read(go_pipe[0], &bf, 1);
1531  		/*
1532  		 * The parent will ask for the execvp() to be performed by
1533  		 * writing exactly one byte, in workload.cork_fd, usually via
1534  		 * evlist__start_workload().
1535  		 *
1536  		 * For cancelling the workload without actually running it,
1537  		 * the parent will just close workload.cork_fd, without writing
1538  		 * anything, i.e. read will return zero and we just exit()
1539  		 * here.
1540  		 */
1541  		if (ret != 1) {
1542  			if (ret == -1)
1543  				perror("unable to read pipe");
1544  			exit(ret);
1545  		}
1546  
1547  		execvp(argv[0], (char **)argv);
1548  
1549  		if (exec_error) {
1550  			union sigval val;
1551  
1552  			val.sival_int = errno;
1553  			if (sigqueue(getppid(), SIGUSR1, val))
1554  				perror(argv[0]);
1555  		} else
1556  			perror(argv[0]);
1557  		exit(-1);
1558  	}
1559  
1560  	if (exec_error) {
1561  		struct sigaction act = {
1562  			.sa_flags     = SA_SIGINFO,
1563  			.sa_sigaction = exec_error,
1564  		};
1565  		sigaction(SIGUSR1, &act, NULL);
1566  	}
1567  
1568  	if (target__none(target)) {
1569  		if (evlist->core.threads == NULL) {
1570  			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1571  				__func__, __LINE__);
1572  			goto out_close_pipes;
1573  		}
1574  		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1575  	}
1576  
1577  	close(child_ready_pipe[1]);
1578  	close(go_pipe[0]);
1579  	/*
1580  	 * wait for child to settle
1581  	 */
1582  	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1583  		perror("unable to read pipe");
1584  		goto out_close_pipes;
1585  	}
1586  
1587  	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1588  	evlist->workload.cork_fd = go_pipe[1];
1589  	close(child_ready_pipe[0]);
1590  	return 0;
1591  
1592  out_close_pipes:
1593  	close(go_pipe[0]);
1594  	close(go_pipe[1]);
1595  out_close_ready_pipe:
1596  	close(child_ready_pipe[0]);
1597  	close(child_ready_pipe[1]);
1598  	return -1;
1599  }
1600  
evlist__start_workload(struct evlist * evlist)1601  int evlist__start_workload(struct evlist *evlist)
1602  {
1603  	if (evlist->workload.cork_fd > 0) {
1604  		char bf = 0;
1605  		int ret;
1606  		/*
1607  		 * Remove the cork, let it rip!
1608  		 */
1609  		ret = write(evlist->workload.cork_fd, &bf, 1);
1610  		if (ret < 0)
1611  			perror("unable to write to pipe");
1612  
1613  		close(evlist->workload.cork_fd);
1614  		return ret;
1615  	}
1616  
1617  	return 0;
1618  }
1619  
evlist__parse_sample(struct evlist * evlist,union perf_event * event,struct perf_sample * sample)1620  int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1621  {
1622  	struct evsel *evsel = evlist__event2evsel(evlist, event);
1623  	int ret;
1624  
1625  	if (!evsel)
1626  		return -EFAULT;
1627  	ret = evsel__parse_sample(evsel, event, sample);
1628  	if (ret)
1629  		return ret;
1630  	if (perf_guest && sample->id) {
1631  		struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id);
1632  
1633  		if (sid) {
1634  			sample->machine_pid = sid->machine_pid;
1635  			sample->vcpu = sid->vcpu.cpu;
1636  		}
1637  	}
1638  	return 0;
1639  }
1640  
evlist__parse_sample_timestamp(struct evlist * evlist,union perf_event * event,u64 * timestamp)1641  int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
1642  {
1643  	struct evsel *evsel = evlist__event2evsel(evlist, event);
1644  
1645  	if (!evsel)
1646  		return -EFAULT;
1647  	return evsel__parse_sample_timestamp(evsel, event, timestamp);
1648  }
1649  
evlist__strerror_open(struct evlist * evlist,int err,char * buf,size_t size)1650  int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
1651  {
1652  	int printed, value;
1653  	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1654  
1655  	switch (err) {
1656  	case EACCES:
1657  	case EPERM:
1658  		printed = scnprintf(buf, size,
1659  				    "Error:\t%s.\n"
1660  				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1661  
1662  		value = perf_event_paranoid();
1663  
1664  		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1665  
1666  		if (value >= 2) {
1667  			printed += scnprintf(buf + printed, size - printed,
1668  					     "For your workloads it needs to be <= 1\nHint:\t");
1669  		}
1670  		printed += scnprintf(buf + printed, size - printed,
1671  				     "For system wide tracing it needs to be set to -1.\n");
1672  
1673  		printed += scnprintf(buf + printed, size - printed,
1674  				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1675  				    "Hint:\tThe current value is %d.", value);
1676  		break;
1677  	case EINVAL: {
1678  		struct evsel *first = evlist__first(evlist);
1679  		int max_freq;
1680  
1681  		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1682  			goto out_default;
1683  
1684  		if (first->core.attr.sample_freq < (u64)max_freq)
1685  			goto out_default;
1686  
1687  		printed = scnprintf(buf, size,
1688  				    "Error:\t%s.\n"
1689  				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1690  				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1691  				    emsg, max_freq, first->core.attr.sample_freq);
1692  		break;
1693  	}
1694  	default:
1695  out_default:
1696  		scnprintf(buf, size, "%s", emsg);
1697  		break;
1698  	}
1699  
1700  	return 0;
1701  }
1702  
evlist__strerror_mmap(struct evlist * evlist,int err,char * buf,size_t size)1703  int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1704  {
1705  	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1706  	int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1707  
1708  	switch (err) {
1709  	case EPERM:
1710  		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1711  		printed += scnprintf(buf + printed, size - printed,
1712  				     "Error:\t%s.\n"
1713  				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1714  				     "Hint:\tTried using %zd kB.\n",
1715  				     emsg, pages_max_per_user, pages_attempted);
1716  
1717  		if (pages_attempted >= pages_max_per_user) {
1718  			printed += scnprintf(buf + printed, size - printed,
1719  					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1720  					     pages_max_per_user + pages_attempted);
1721  		}
1722  
1723  		printed += scnprintf(buf + printed, size - printed,
1724  				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1725  		break;
1726  	default:
1727  		scnprintf(buf, size, "%s", emsg);
1728  		break;
1729  	}
1730  
1731  	return 0;
1732  }
1733  
evlist__to_front(struct evlist * evlist,struct evsel * move_evsel)1734  void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
1735  {
1736  	struct evsel *evsel, *n;
1737  	LIST_HEAD(move);
1738  
1739  	if (move_evsel == evlist__first(evlist))
1740  		return;
1741  
1742  	evlist__for_each_entry_safe(evlist, n, evsel) {
1743  		if (evsel__leader(evsel) == evsel__leader(move_evsel))
1744  			list_move_tail(&evsel->core.node, &move);
1745  	}
1746  
1747  	list_splice(&move, &evlist->core.entries);
1748  }
1749  
evlist__get_tracking_event(struct evlist * evlist)1750  struct evsel *evlist__get_tracking_event(struct evlist *evlist)
1751  {
1752  	struct evsel *evsel;
1753  
1754  	evlist__for_each_entry(evlist, evsel) {
1755  		if (evsel->tracking)
1756  			return evsel;
1757  	}
1758  
1759  	return evlist__first(evlist);
1760  }
1761  
evlist__set_tracking_event(struct evlist * evlist,struct evsel * tracking_evsel)1762  void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
1763  {
1764  	struct evsel *evsel;
1765  
1766  	if (tracking_evsel->tracking)
1767  		return;
1768  
1769  	evlist__for_each_entry(evlist, evsel) {
1770  		if (evsel != tracking_evsel)
1771  			evsel->tracking = false;
1772  	}
1773  
1774  	tracking_evsel->tracking = true;
1775  }
1776  
evlist__findnew_tracking_event(struct evlist * evlist,bool system_wide)1777  struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide)
1778  {
1779  	struct evsel *evsel;
1780  
1781  	evsel = evlist__get_tracking_event(evlist);
1782  	if (!evsel__is_dummy_event(evsel)) {
1783  		evsel = evlist__add_aux_dummy(evlist, system_wide);
1784  		if (!evsel)
1785  			return NULL;
1786  
1787  		evlist__set_tracking_event(evlist, evsel);
1788  	} else if (system_wide) {
1789  		perf_evlist__go_system_wide(&evlist->core, &evsel->core);
1790  	}
1791  
1792  	return evsel;
1793  }
1794  
evlist__find_evsel_by_str(struct evlist * evlist,const char * str)1795  struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
1796  {
1797  	struct evsel *evsel;
1798  
1799  	evlist__for_each_entry(evlist, evsel) {
1800  		if (!evsel->name)
1801  			continue;
1802  		if (evsel__name_is(evsel, str))
1803  			return evsel;
1804  	}
1805  
1806  	return NULL;
1807  }
1808  
evlist__toggle_bkw_mmap(struct evlist * evlist,enum bkw_mmap_state state)1809  void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
1810  {
1811  	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1812  	enum action {
1813  		NONE,
1814  		PAUSE,
1815  		RESUME,
1816  	} action = NONE;
1817  
1818  	if (!evlist->overwrite_mmap)
1819  		return;
1820  
1821  	switch (old_state) {
1822  	case BKW_MMAP_NOTREADY: {
1823  		if (state != BKW_MMAP_RUNNING)
1824  			goto state_err;
1825  		break;
1826  	}
1827  	case BKW_MMAP_RUNNING: {
1828  		if (state != BKW_MMAP_DATA_PENDING)
1829  			goto state_err;
1830  		action = PAUSE;
1831  		break;
1832  	}
1833  	case BKW_MMAP_DATA_PENDING: {
1834  		if (state != BKW_MMAP_EMPTY)
1835  			goto state_err;
1836  		break;
1837  	}
1838  	case BKW_MMAP_EMPTY: {
1839  		if (state != BKW_MMAP_RUNNING)
1840  			goto state_err;
1841  		action = RESUME;
1842  		break;
1843  	}
1844  	default:
1845  		WARN_ONCE(1, "Shouldn't get there\n");
1846  	}
1847  
1848  	evlist->bkw_mmap_state = state;
1849  
1850  	switch (action) {
1851  	case PAUSE:
1852  		evlist__pause(evlist);
1853  		break;
1854  	case RESUME:
1855  		evlist__resume(evlist);
1856  		break;
1857  	case NONE:
1858  	default:
1859  		break;
1860  	}
1861  
1862  state_err:
1863  	return;
1864  }
1865  
evlist__exclude_kernel(struct evlist * evlist)1866  bool evlist__exclude_kernel(struct evlist *evlist)
1867  {
1868  	struct evsel *evsel;
1869  
1870  	evlist__for_each_entry(evlist, evsel) {
1871  		if (!evsel->core.attr.exclude_kernel)
1872  			return false;
1873  	}
1874  
1875  	return true;
1876  }
1877  
1878  /*
1879   * Events in data file are not collect in groups, but we still want
1880   * the group display. Set the artificial group and set the leader's
1881   * forced_leader flag to notify the display code.
1882   */
evlist__force_leader(struct evlist * evlist)1883  void evlist__force_leader(struct evlist *evlist)
1884  {
1885  	if (evlist__nr_groups(evlist) == 0) {
1886  		struct evsel *leader = evlist__first(evlist);
1887  
1888  		evlist__set_leader(evlist);
1889  		leader->forced_leader = true;
1890  	}
1891  }
1892  
evlist__reset_weak_group(struct evlist * evsel_list,struct evsel * evsel,bool close)1893  struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
1894  {
1895  	struct evsel *c2, *leader;
1896  	bool is_open = true;
1897  
1898  	leader = evsel__leader(evsel);
1899  
1900  	pr_debug("Weak group for %s/%d failed\n",
1901  			leader->name, leader->core.nr_members);
1902  
1903  	/*
1904  	 * for_each_group_member doesn't work here because it doesn't
1905  	 * include the first entry.
1906  	 */
1907  	evlist__for_each_entry(evsel_list, c2) {
1908  		if (c2 == evsel)
1909  			is_open = false;
1910  		if (evsel__has_leader(c2, leader)) {
1911  			if (is_open && close)
1912  				perf_evsel__close(&c2->core);
1913  			/*
1914  			 * We want to close all members of the group and reopen
1915  			 * them. Some events, like Intel topdown, require being
1916  			 * in a group and so keep these in the group.
1917  			 */
1918  			evsel__remove_from_group(c2, leader);
1919  
1920  			/*
1921  			 * Set this for all former members of the group
1922  			 * to indicate they get reopened.
1923  			 */
1924  			c2->reset_group = true;
1925  		}
1926  	}
1927  	/* Reset the leader count if all entries were removed. */
1928  	if (leader->core.nr_members == 1)
1929  		leader->core.nr_members = 0;
1930  	return leader;
1931  }
1932  
evlist__parse_control_fifo(const char * str,int * ctl_fd,int * ctl_fd_ack,bool * ctl_fd_close)1933  static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1934  {
1935  	char *s, *p;
1936  	int ret = 0, fd;
1937  
1938  	if (strncmp(str, "fifo:", 5))
1939  		return -EINVAL;
1940  
1941  	str += 5;
1942  	if (!*str || *str == ',')
1943  		return -EINVAL;
1944  
1945  	s = strdup(str);
1946  	if (!s)
1947  		return -ENOMEM;
1948  
1949  	p = strchr(s, ',');
1950  	if (p)
1951  		*p = '\0';
1952  
1953  	/*
1954  	 * O_RDWR avoids POLLHUPs which is necessary to allow the other
1955  	 * end of a FIFO to be repeatedly opened and closed.
1956  	 */
1957  	fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1958  	if (fd < 0) {
1959  		pr_err("Failed to open '%s'\n", s);
1960  		ret = -errno;
1961  		goto out_free;
1962  	}
1963  	*ctl_fd = fd;
1964  	*ctl_fd_close = true;
1965  
1966  	if (p && *++p) {
1967  		/* O_RDWR | O_NONBLOCK means the other end need not be open */
1968  		fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1969  		if (fd < 0) {
1970  			pr_err("Failed to open '%s'\n", p);
1971  			ret = -errno;
1972  			goto out_free;
1973  		}
1974  		*ctl_fd_ack = fd;
1975  	}
1976  
1977  out_free:
1978  	free(s);
1979  	return ret;
1980  }
1981  
evlist__parse_control(const char * str,int * ctl_fd,int * ctl_fd_ack,bool * ctl_fd_close)1982  int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1983  {
1984  	char *comma = NULL, *endptr = NULL;
1985  
1986  	*ctl_fd_close = false;
1987  
1988  	if (strncmp(str, "fd:", 3))
1989  		return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1990  
1991  	*ctl_fd = strtoul(&str[3], &endptr, 0);
1992  	if (endptr == &str[3])
1993  		return -EINVAL;
1994  
1995  	comma = strchr(str, ',');
1996  	if (comma) {
1997  		if (endptr != comma)
1998  			return -EINVAL;
1999  
2000  		*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
2001  		if (endptr == comma + 1 || *endptr != '\0')
2002  			return -EINVAL;
2003  	}
2004  
2005  	return 0;
2006  }
2007  
evlist__close_control(int ctl_fd,int ctl_fd_ack,bool * ctl_fd_close)2008  void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
2009  {
2010  	if (*ctl_fd_close) {
2011  		*ctl_fd_close = false;
2012  		close(ctl_fd);
2013  		if (ctl_fd_ack >= 0)
2014  			close(ctl_fd_ack);
2015  	}
2016  }
2017  
evlist__initialize_ctlfd(struct evlist * evlist,int fd,int ack)2018  int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
2019  {
2020  	if (fd == -1) {
2021  		pr_debug("Control descriptor is not initialized\n");
2022  		return 0;
2023  	}
2024  
2025  	evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
2026  						     fdarray_flag__nonfilterable |
2027  						     fdarray_flag__non_perf_event);
2028  	if (evlist->ctl_fd.pos < 0) {
2029  		evlist->ctl_fd.pos = -1;
2030  		pr_err("Failed to add ctl fd entry: %m\n");
2031  		return -1;
2032  	}
2033  
2034  	evlist->ctl_fd.fd = fd;
2035  	evlist->ctl_fd.ack = ack;
2036  
2037  	return 0;
2038  }
2039  
evlist__ctlfd_initialized(struct evlist * evlist)2040  bool evlist__ctlfd_initialized(struct evlist *evlist)
2041  {
2042  	return evlist->ctl_fd.pos >= 0;
2043  }
2044  
evlist__finalize_ctlfd(struct evlist * evlist)2045  int evlist__finalize_ctlfd(struct evlist *evlist)
2046  {
2047  	struct pollfd *entries = evlist->core.pollfd.entries;
2048  
2049  	if (!evlist__ctlfd_initialized(evlist))
2050  		return 0;
2051  
2052  	entries[evlist->ctl_fd.pos].fd = -1;
2053  	entries[evlist->ctl_fd.pos].events = 0;
2054  	entries[evlist->ctl_fd.pos].revents = 0;
2055  
2056  	evlist->ctl_fd.pos = -1;
2057  	evlist->ctl_fd.ack = -1;
2058  	evlist->ctl_fd.fd = -1;
2059  
2060  	return 0;
2061  }
2062  
evlist__ctlfd_recv(struct evlist * evlist,enum evlist_ctl_cmd * cmd,char * cmd_data,size_t data_size)2063  static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
2064  			      char *cmd_data, size_t data_size)
2065  {
2066  	int err;
2067  	char c;
2068  	size_t bytes_read = 0;
2069  
2070  	*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
2071  	memset(cmd_data, 0, data_size);
2072  	data_size--;
2073  
2074  	do {
2075  		err = read(evlist->ctl_fd.fd, &c, 1);
2076  		if (err > 0) {
2077  			if (c == '\n' || c == '\0')
2078  				break;
2079  			cmd_data[bytes_read++] = c;
2080  			if (bytes_read == data_size)
2081  				break;
2082  			continue;
2083  		} else if (err == -1) {
2084  			if (errno == EINTR)
2085  				continue;
2086  			if (errno == EAGAIN || errno == EWOULDBLOCK)
2087  				err = 0;
2088  			else
2089  				pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
2090  		}
2091  		break;
2092  	} while (1);
2093  
2094  	pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
2095  		 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
2096  
2097  	if (bytes_read > 0) {
2098  		if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
2099  			     (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
2100  			*cmd = EVLIST_CTL_CMD_ENABLE;
2101  		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
2102  				    (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
2103  			*cmd = EVLIST_CTL_CMD_DISABLE;
2104  		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
2105  				    (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
2106  			*cmd = EVLIST_CTL_CMD_SNAPSHOT;
2107  			pr_debug("is snapshot\n");
2108  		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
2109  				    (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
2110  			*cmd = EVLIST_CTL_CMD_EVLIST;
2111  		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
2112  				    (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
2113  			*cmd = EVLIST_CTL_CMD_STOP;
2114  		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
2115  				    (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
2116  			*cmd = EVLIST_CTL_CMD_PING;
2117  		}
2118  	}
2119  
2120  	return bytes_read ? (int)bytes_read : err;
2121  }
2122  
evlist__ctlfd_ack(struct evlist * evlist)2123  int evlist__ctlfd_ack(struct evlist *evlist)
2124  {
2125  	int err;
2126  
2127  	if (evlist->ctl_fd.ack == -1)
2128  		return 0;
2129  
2130  	err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
2131  		    sizeof(EVLIST_CTL_CMD_ACK_TAG));
2132  	if (err == -1)
2133  		pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
2134  
2135  	return err;
2136  }
2137  
get_cmd_arg(char * cmd_data,size_t cmd_size,char ** arg)2138  static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
2139  {
2140  	char *data = cmd_data + cmd_size;
2141  
2142  	/* no argument */
2143  	if (!*data)
2144  		return 0;
2145  
2146  	/* there's argument */
2147  	if (*data == ' ') {
2148  		*arg = data + 1;
2149  		return 1;
2150  	}
2151  
2152  	/* malformed */
2153  	return -1;
2154  }
2155  
evlist__ctlfd_enable(struct evlist * evlist,char * cmd_data,bool enable)2156  static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
2157  {
2158  	struct evsel *evsel;
2159  	char *name;
2160  	int err;
2161  
2162  	err = get_cmd_arg(cmd_data,
2163  			  enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
2164  				   sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
2165  			  &name);
2166  	if (err < 0) {
2167  		pr_info("failed: wrong command\n");
2168  		return -1;
2169  	}
2170  
2171  	if (err) {
2172  		evsel = evlist__find_evsel_by_str(evlist, name);
2173  		if (evsel) {
2174  			if (enable)
2175  				evlist__enable_evsel(evlist, name);
2176  			else
2177  				evlist__disable_evsel(evlist, name);
2178  			pr_info("Event %s %s\n", evsel->name,
2179  				enable ? "enabled" : "disabled");
2180  		} else {
2181  			pr_info("failed: can't find '%s' event\n", name);
2182  		}
2183  	} else {
2184  		if (enable) {
2185  			evlist__enable(evlist);
2186  			pr_info(EVLIST_ENABLED_MSG);
2187  		} else {
2188  			evlist__disable(evlist);
2189  			pr_info(EVLIST_DISABLED_MSG);
2190  		}
2191  	}
2192  
2193  	return 0;
2194  }
2195  
evlist__ctlfd_list(struct evlist * evlist,char * cmd_data)2196  static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
2197  {
2198  	struct perf_attr_details details = { .verbose = false, };
2199  	struct evsel *evsel;
2200  	char *arg;
2201  	int err;
2202  
2203  	err = get_cmd_arg(cmd_data,
2204  			  sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
2205  			  &arg);
2206  	if (err < 0) {
2207  		pr_info("failed: wrong command\n");
2208  		return -1;
2209  	}
2210  
2211  	if (err) {
2212  		if (!strcmp(arg, "-v")) {
2213  			details.verbose = true;
2214  		} else if (!strcmp(arg, "-g")) {
2215  			details.event_group = true;
2216  		} else if (!strcmp(arg, "-F")) {
2217  			details.freq = true;
2218  		} else {
2219  			pr_info("failed: wrong command\n");
2220  			return -1;
2221  		}
2222  	}
2223  
2224  	evlist__for_each_entry(evlist, evsel)
2225  		evsel__fprintf(evsel, &details, stderr);
2226  
2227  	return 0;
2228  }
2229  
evlist__ctlfd_process(struct evlist * evlist,enum evlist_ctl_cmd * cmd)2230  int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
2231  {
2232  	int err = 0;
2233  	char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
2234  	int ctlfd_pos = evlist->ctl_fd.pos;
2235  	struct pollfd *entries = evlist->core.pollfd.entries;
2236  
2237  	if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
2238  		return 0;
2239  
2240  	if (entries[ctlfd_pos].revents & POLLIN) {
2241  		err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
2242  					 EVLIST_CTL_CMD_MAX_LEN);
2243  		if (err > 0) {
2244  			switch (*cmd) {
2245  			case EVLIST_CTL_CMD_ENABLE:
2246  			case EVLIST_CTL_CMD_DISABLE:
2247  				err = evlist__ctlfd_enable(evlist, cmd_data,
2248  							   *cmd == EVLIST_CTL_CMD_ENABLE);
2249  				break;
2250  			case EVLIST_CTL_CMD_EVLIST:
2251  				err = evlist__ctlfd_list(evlist, cmd_data);
2252  				break;
2253  			case EVLIST_CTL_CMD_SNAPSHOT:
2254  			case EVLIST_CTL_CMD_STOP:
2255  			case EVLIST_CTL_CMD_PING:
2256  				break;
2257  			case EVLIST_CTL_CMD_ACK:
2258  			case EVLIST_CTL_CMD_UNSUPPORTED:
2259  			default:
2260  				pr_debug("ctlfd: unsupported %d\n", *cmd);
2261  				break;
2262  			}
2263  			if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
2264  			      *cmd == EVLIST_CTL_CMD_SNAPSHOT))
2265  				evlist__ctlfd_ack(evlist);
2266  		}
2267  	}
2268  
2269  	if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
2270  		evlist__finalize_ctlfd(evlist);
2271  	else
2272  		entries[ctlfd_pos].revents = 0;
2273  
2274  	return err;
2275  }
2276  
2277  /**
2278   * struct event_enable_time - perf record -D/--delay single time range.
2279   * @start: start of time range to enable events in milliseconds
2280   * @end: end of time range to enable events in milliseconds
2281   *
2282   * N.B. this structure is also accessed as an array of int.
2283   */
2284  struct event_enable_time {
2285  	int	start;
2286  	int	end;
2287  };
2288  
parse_event_enable_time(const char * str,struct event_enable_time * range,bool first)2289  static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first)
2290  {
2291  	const char *fmt = first ? "%u - %u %n" : " , %u - %u %n";
2292  	int ret, start, end, n;
2293  
2294  	ret = sscanf(str, fmt, &start, &end, &n);
2295  	if (ret != 2 || end <= start)
2296  		return -EINVAL;
2297  	if (range) {
2298  		range->start = start;
2299  		range->end = end;
2300  	}
2301  	return n;
2302  }
2303  
parse_event_enable_times(const char * str,struct event_enable_time * range)2304  static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range)
2305  {
2306  	int incr = !!range;
2307  	bool first = true;
2308  	ssize_t ret, cnt;
2309  
2310  	for (cnt = 0; *str; cnt++) {
2311  		ret = parse_event_enable_time(str, range, first);
2312  		if (ret < 0)
2313  			return ret;
2314  		/* Check no overlap */
2315  		if (!first && range && range->start <= range[-1].end)
2316  			return -EINVAL;
2317  		str += ret;
2318  		range += incr;
2319  		first = false;
2320  	}
2321  	return cnt;
2322  }
2323  
2324  /**
2325   * struct event_enable_timer - control structure for perf record -D/--delay.
2326   * @evlist: event list
2327   * @times: time ranges that events are enabled (N.B. this is also accessed as an
2328   *         array of int)
2329   * @times_cnt: number of time ranges
2330   * @timerfd: timer file descriptor
2331   * @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray)
2332   * @times_step: current position in (int *)@times)[],
2333   *              refer event_enable_timer__process()
2334   *
2335   * Note, this structure is only used when there are time ranges, not when there
2336   * is only an initial delay.
2337   */
2338  struct event_enable_timer {
2339  	struct evlist *evlist;
2340  	struct event_enable_time *times;
2341  	size_t	times_cnt;
2342  	int	timerfd;
2343  	int	pollfd_pos;
2344  	size_t	times_step;
2345  };
2346  
str_to_delay(const char * str)2347  static int str_to_delay(const char *str)
2348  {
2349  	char *endptr;
2350  	long d;
2351  
2352  	d = strtol(str, &endptr, 10);
2353  	if (*endptr || d > INT_MAX || d < -1)
2354  		return 0;
2355  	return d;
2356  }
2357  
evlist__parse_event_enable_time(struct evlist * evlist,struct record_opts * opts,const char * str,int unset)2358  int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts,
2359  				    const char *str, int unset)
2360  {
2361  	enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event;
2362  	struct event_enable_timer *eet;
2363  	ssize_t times_cnt;
2364  	ssize_t ret;
2365  	int err;
2366  
2367  	if (unset)
2368  		return 0;
2369  
2370  	opts->target.initial_delay = str_to_delay(str);
2371  	if (opts->target.initial_delay)
2372  		return 0;
2373  
2374  	ret = parse_event_enable_times(str, NULL);
2375  	if (ret < 0)
2376  		return ret;
2377  
2378  	times_cnt = ret;
2379  	if (times_cnt == 0)
2380  		return -EINVAL;
2381  
2382  	eet = zalloc(sizeof(*eet));
2383  	if (!eet)
2384  		return -ENOMEM;
2385  
2386  	eet->times = calloc(times_cnt, sizeof(*eet->times));
2387  	if (!eet->times) {
2388  		err = -ENOMEM;
2389  		goto free_eet;
2390  	}
2391  
2392  	if (parse_event_enable_times(str, eet->times) != times_cnt) {
2393  		err = -EINVAL;
2394  		goto free_eet_times;
2395  	}
2396  
2397  	eet->times_cnt = times_cnt;
2398  
2399  	eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
2400  	if (eet->timerfd == -1) {
2401  		err = -errno;
2402  		pr_err("timerfd_create failed: %s\n", strerror(errno));
2403  		goto free_eet_times;
2404  	}
2405  
2406  	eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags);
2407  	if (eet->pollfd_pos < 0) {
2408  		err = eet->pollfd_pos;
2409  		goto close_timerfd;
2410  	}
2411  
2412  	eet->evlist = evlist;
2413  	evlist->eet = eet;
2414  	opts->target.initial_delay = eet->times[0].start;
2415  
2416  	return 0;
2417  
2418  close_timerfd:
2419  	close(eet->timerfd);
2420  free_eet_times:
2421  	zfree(&eet->times);
2422  free_eet:
2423  	free(eet);
2424  	return err;
2425  }
2426  
event_enable_timer__set_timer(struct event_enable_timer * eet,int ms)2427  static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms)
2428  {
2429  	struct itimerspec its = {
2430  		.it_value.tv_sec = ms / MSEC_PER_SEC,
2431  		.it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC,
2432  	};
2433  	int err = 0;
2434  
2435  	if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) {
2436  		err = -errno;
2437  		pr_err("timerfd_settime failed: %s\n", strerror(errno));
2438  	}
2439  	return err;
2440  }
2441  
event_enable_timer__start(struct event_enable_timer * eet)2442  int event_enable_timer__start(struct event_enable_timer *eet)
2443  {
2444  	int ms;
2445  
2446  	if (!eet)
2447  		return 0;
2448  
2449  	ms = eet->times[0].end - eet->times[0].start;
2450  	eet->times_step = 1;
2451  
2452  	return event_enable_timer__set_timer(eet, ms);
2453  }
2454  
event_enable_timer__process(struct event_enable_timer * eet)2455  int event_enable_timer__process(struct event_enable_timer *eet)
2456  {
2457  	struct pollfd *entries;
2458  	short revents;
2459  
2460  	if (!eet)
2461  		return 0;
2462  
2463  	entries = eet->evlist->core.pollfd.entries;
2464  	revents = entries[eet->pollfd_pos].revents;
2465  	entries[eet->pollfd_pos].revents = 0;
2466  
2467  	if (revents & POLLIN) {
2468  		size_t step = eet->times_step;
2469  		size_t pos = step / 2;
2470  
2471  		if (step & 1) {
2472  			evlist__disable_non_dummy(eet->evlist);
2473  			pr_info(EVLIST_DISABLED_MSG);
2474  			if (pos >= eet->times_cnt - 1) {
2475  				/* Disarm timer */
2476  				event_enable_timer__set_timer(eet, 0);
2477  				return 1; /* Stop */
2478  			}
2479  		} else {
2480  			evlist__enable_non_dummy(eet->evlist);
2481  			pr_info(EVLIST_ENABLED_MSG);
2482  		}
2483  
2484  		step += 1;
2485  		pos = step / 2;
2486  
2487  		if (pos < eet->times_cnt) {
2488  			int *times = (int *)eet->times; /* Accessing 'times' as array of int */
2489  			int ms = times[step] - times[step - 1];
2490  
2491  			eet->times_step = step;
2492  			return event_enable_timer__set_timer(eet, ms);
2493  		}
2494  	}
2495  
2496  	return 0;
2497  }
2498  
event_enable_timer__exit(struct event_enable_timer ** ep)2499  void event_enable_timer__exit(struct event_enable_timer **ep)
2500  {
2501  	if (!ep || !*ep)
2502  		return;
2503  	zfree(&(*ep)->times);
2504  	zfree(ep);
2505  }
2506  
evlist__find_evsel(struct evlist * evlist,int idx)2507  struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
2508  {
2509  	struct evsel *evsel;
2510  
2511  	evlist__for_each_entry(evlist, evsel) {
2512  		if (evsel->core.idx == idx)
2513  			return evsel;
2514  	}
2515  	return NULL;
2516  }
2517  
evlist__scnprintf_evsels(struct evlist * evlist,size_t size,char * bf)2518  int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
2519  {
2520  	struct evsel *evsel;
2521  	int printed = 0;
2522  
2523  	evlist__for_each_entry(evlist, evsel) {
2524  		if (evsel__is_dummy_event(evsel))
2525  			continue;
2526  		if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
2527  			printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
2528  		} else {
2529  			printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
2530  			break;
2531  		}
2532  	}
2533  
2534  	return printed;
2535  }
2536  
evlist__check_mem_load_aux(struct evlist * evlist)2537  void evlist__check_mem_load_aux(struct evlist *evlist)
2538  {
2539  	struct evsel *leader, *evsel, *pos;
2540  
2541  	/*
2542  	 * For some platforms, the 'mem-loads' event is required to use
2543  	 * together with 'mem-loads-aux' within a group and 'mem-loads-aux'
2544  	 * must be the group leader. Now we disable this group before reporting
2545  	 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
2546  	 * any valid memory load information.
2547  	 */
2548  	evlist__for_each_entry(evlist, evsel) {
2549  		leader = evsel__leader(evsel);
2550  		if (leader == evsel)
2551  			continue;
2552  
2553  		if (leader->name && strstr(leader->name, "mem-loads-aux")) {
2554  			for_each_group_evsel(pos, leader) {
2555  				evsel__set_leader(pos, pos);
2556  				pos->core.nr_members = 0;
2557  			}
2558  		}
2559  	}
2560  }
2561  
2562  /**
2563   * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs
2564   *     and warn if the user CPU list is inapplicable for the event's PMU's
2565   *     CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a
2566   *     user requested CPU and so any online CPU is applicable. Core PMUs handle
2567   *     events on the CPUs in their list and otherwise the event isn't supported.
2568   * @evlist: The list of events being checked.
2569   * @cpu_list: The user provided list of CPUs.
2570   */
evlist__warn_user_requested_cpus(struct evlist * evlist,const char * cpu_list)2571  void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list)
2572  {
2573  	struct perf_cpu_map *user_requested_cpus;
2574  	struct evsel *pos;
2575  
2576  	if (!cpu_list)
2577  		return;
2578  
2579  	user_requested_cpus = perf_cpu_map__new(cpu_list);
2580  	if (!user_requested_cpus)
2581  		return;
2582  
2583  	evlist__for_each_entry(evlist, pos) {
2584  		struct perf_cpu_map *intersect, *to_test;
2585  		const struct perf_pmu *pmu = evsel__find_pmu(pos);
2586  
2587  		to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online();
2588  		intersect = perf_cpu_map__intersect(to_test, user_requested_cpus);
2589  		if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
2590  			char buf[128];
2591  
2592  			cpu_map__snprint(to_test, buf, sizeof(buf));
2593  			pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
2594  				cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos));
2595  		}
2596  		perf_cpu_map__put(intersect);
2597  	}
2598  	perf_cpu_map__put(user_requested_cpus);
2599  }
2600  
evlist__uniquify_name(struct evlist * evlist)2601  void evlist__uniquify_name(struct evlist *evlist)
2602  {
2603  	char *new_name, empty_attributes[2] = ":", *attributes;
2604  	struct evsel *pos;
2605  
2606  	if (perf_pmus__num_core_pmus() == 1)
2607  		return;
2608  
2609  	evlist__for_each_entry(evlist, pos) {
2610  		if (!evsel__is_hybrid(pos))
2611  			continue;
2612  
2613  		if (strchr(pos->name, '/'))
2614  			continue;
2615  
2616  		attributes = strchr(pos->name, ':');
2617  		if (attributes)
2618  			*attributes = '\0';
2619  		else
2620  			attributes = empty_attributes;
2621  
2622  		if (asprintf(&new_name, "%s/%s/%s", pos->pmu_name, pos->name, attributes + 1)) {
2623  			free(pos->name);
2624  			pos->name = new_name;
2625  		} else {
2626  			*attributes = ':';
2627  		}
2628  	}
2629  }
2630  
evlist__has_bpf_output(struct evlist * evlist)2631  bool evlist__has_bpf_output(struct evlist *evlist)
2632  {
2633  	struct evsel *evsel;
2634  
2635  	evlist__for_each_entry(evlist, evsel) {
2636  		if (evsel__is_bpf_output(evsel))
2637  			return true;
2638  	}
2639  
2640  	return false;
2641  }
2642