1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/hw_breakpoint.h>
3  #include <linux/err.h>
4  #include <linux/list_sort.h>
5  #include <linux/zalloc.h>
6  #include <dirent.h>
7  #include <errno.h>
8  #include <sys/ioctl.h>
9  #include <sys/param.h>
10  #include "term.h"
11  #include "env.h"
12  #include "evlist.h"
13  #include "evsel.h"
14  #include <subcmd/parse-options.h>
15  #include "parse-events.h"
16  #include "string2.h"
17  #include "strbuf.h"
18  #include "debug.h"
19  #include <api/fs/tracing_path.h>
20  #include <perf/cpumap.h>
21  #include <util/parse-events-bison.h>
22  #include <util/parse-events-flex.h>
23  #include "pmu.h"
24  #include "pmus.h"
25  #include "asm/bug.h"
26  #include "util/parse-branch-options.h"
27  #include "util/evsel_config.h"
28  #include "util/event.h"
29  #include "util/bpf-filter.h"
30  #include "util/util.h"
31  #include "tracepoint.h"
32  
33  #define MAX_NAME_LEN 100
34  
35  static int get_config_terms(const struct parse_events_terms *head_config,
36  			    struct list_head *head_terms);
37  static int parse_events_terms__copy(const struct parse_events_terms *src,
38  				    struct parse_events_terms *dest);
39  
40  const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
41  	[PERF_COUNT_HW_CPU_CYCLES] = {
42  		.symbol = "cpu-cycles",
43  		.alias  = "cycles",
44  	},
45  	[PERF_COUNT_HW_INSTRUCTIONS] = {
46  		.symbol = "instructions",
47  		.alias  = "",
48  	},
49  	[PERF_COUNT_HW_CACHE_REFERENCES] = {
50  		.symbol = "cache-references",
51  		.alias  = "",
52  	},
53  	[PERF_COUNT_HW_CACHE_MISSES] = {
54  		.symbol = "cache-misses",
55  		.alias  = "",
56  	},
57  	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
58  		.symbol = "branch-instructions",
59  		.alias  = "branches",
60  	},
61  	[PERF_COUNT_HW_BRANCH_MISSES] = {
62  		.symbol = "branch-misses",
63  		.alias  = "",
64  	},
65  	[PERF_COUNT_HW_BUS_CYCLES] = {
66  		.symbol = "bus-cycles",
67  		.alias  = "",
68  	},
69  	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
70  		.symbol = "stalled-cycles-frontend",
71  		.alias  = "idle-cycles-frontend",
72  	},
73  	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
74  		.symbol = "stalled-cycles-backend",
75  		.alias  = "idle-cycles-backend",
76  	},
77  	[PERF_COUNT_HW_REF_CPU_CYCLES] = {
78  		.symbol = "ref-cycles",
79  		.alias  = "",
80  	},
81  };
82  
83  const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
84  	[PERF_COUNT_SW_CPU_CLOCK] = {
85  		.symbol = "cpu-clock",
86  		.alias  = "",
87  	},
88  	[PERF_COUNT_SW_TASK_CLOCK] = {
89  		.symbol = "task-clock",
90  		.alias  = "",
91  	},
92  	[PERF_COUNT_SW_PAGE_FAULTS] = {
93  		.symbol = "page-faults",
94  		.alias  = "faults",
95  	},
96  	[PERF_COUNT_SW_CONTEXT_SWITCHES] = {
97  		.symbol = "context-switches",
98  		.alias  = "cs",
99  	},
100  	[PERF_COUNT_SW_CPU_MIGRATIONS] = {
101  		.symbol = "cpu-migrations",
102  		.alias  = "migrations",
103  	},
104  	[PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
105  		.symbol = "minor-faults",
106  		.alias  = "",
107  	},
108  	[PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
109  		.symbol = "major-faults",
110  		.alias  = "",
111  	},
112  	[PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
113  		.symbol = "alignment-faults",
114  		.alias  = "",
115  	},
116  	[PERF_COUNT_SW_EMULATION_FAULTS] = {
117  		.symbol = "emulation-faults",
118  		.alias  = "",
119  	},
120  	[PERF_COUNT_SW_DUMMY] = {
121  		.symbol = "dummy",
122  		.alias  = "",
123  	},
124  	[PERF_COUNT_SW_BPF_OUTPUT] = {
125  		.symbol = "bpf-output",
126  		.alias  = "",
127  	},
128  	[PERF_COUNT_SW_CGROUP_SWITCHES] = {
129  		.symbol = "cgroup-switches",
130  		.alias  = "",
131  	},
132  };
133  
event_type(int type)134  const char *event_type(int type)
135  {
136  	switch (type) {
137  	case PERF_TYPE_HARDWARE:
138  		return "hardware";
139  
140  	case PERF_TYPE_SOFTWARE:
141  		return "software";
142  
143  	case PERF_TYPE_TRACEPOINT:
144  		return "tracepoint";
145  
146  	case PERF_TYPE_HW_CACHE:
147  		return "hardware-cache";
148  
149  	default:
150  		break;
151  	}
152  
153  	return "unknown";
154  }
155  
get_config_str(const struct parse_events_terms * head_terms,enum parse_events__term_type type_term)156  static char *get_config_str(const struct parse_events_terms *head_terms,
157  			    enum parse_events__term_type type_term)
158  {
159  	struct parse_events_term *term;
160  
161  	if (!head_terms)
162  		return NULL;
163  
164  	list_for_each_entry(term, &head_terms->terms, list)
165  		if (term->type_term == type_term)
166  			return term->val.str;
167  
168  	return NULL;
169  }
170  
get_config_metric_id(const struct parse_events_terms * head_terms)171  static char *get_config_metric_id(const struct parse_events_terms *head_terms)
172  {
173  	return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
174  }
175  
get_config_name(const struct parse_events_terms * head_terms)176  static char *get_config_name(const struct parse_events_terms *head_terms)
177  {
178  	return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
179  }
180  
181  /**
182   * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
183   *           matches the raw's string value. If the string value matches an
184   *           event then change the term to be an event, if not then change it to
185   *           be a config term. For example, "read" may be an event of the PMU or
186   *           a raw hex encoding of 0xead. The fix-up is done late so the PMU of
187   *           the event can be determined and we don't need to scan all PMUs
188   *           ahead-of-time.
189   * @config_terms: the list of terms that may contain a raw term.
190   * @pmu: the PMU to scan for events from.
191   */
fix_raw(struct parse_events_terms * config_terms,struct perf_pmu * pmu)192  static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
193  {
194  	struct parse_events_term *term;
195  
196  	list_for_each_entry(term, &config_terms->terms, list) {
197  		u64 num;
198  
199  		if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
200  			continue;
201  
202  		if (perf_pmu__have_event(pmu, term->val.str)) {
203  			zfree(&term->config);
204  			term->config = term->val.str;
205  			term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
206  			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
207  			term->val.num = 1;
208  			term->no_value = true;
209  			continue;
210  		}
211  
212  		zfree(&term->config);
213  		term->config = strdup("config");
214  		errno = 0;
215  		num = strtoull(term->val.str + 1, NULL, 16);
216  		assert(errno == 0);
217  		free(term->val.str);
218  		term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
219  		term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
220  		term->val.num = num;
221  		term->no_value = false;
222  	}
223  }
224  
225  static struct evsel *
__add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,bool init_attr,const char * name,const char * metric_id,struct perf_pmu * pmu,struct list_head * config_terms,bool auto_merge_stats,struct perf_cpu_map * cpu_list)226  __add_event(struct list_head *list, int *idx,
227  	    struct perf_event_attr *attr,
228  	    bool init_attr,
229  	    const char *name, const char *metric_id, struct perf_pmu *pmu,
230  	    struct list_head *config_terms, bool auto_merge_stats,
231  	    struct perf_cpu_map *cpu_list)
232  {
233  	struct evsel *evsel;
234  	struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list;
235  
236  	cpus = perf_cpu_map__get(cpus);
237  	if (pmu)
238  		perf_pmu__warn_invalid_formats(pmu);
239  
240  	if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
241  		perf_pmu__warn_invalid_config(pmu, attr->config, name,
242  					      PERF_PMU_FORMAT_VALUE_CONFIG, "config");
243  		perf_pmu__warn_invalid_config(pmu, attr->config1, name,
244  					      PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
245  		perf_pmu__warn_invalid_config(pmu, attr->config2, name,
246  					      PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
247  		perf_pmu__warn_invalid_config(pmu, attr->config3, name,
248  					      PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
249  	}
250  	if (init_attr)
251  		event_attr_init(attr);
252  
253  	evsel = evsel__new_idx(attr, *idx);
254  	if (!evsel) {
255  		perf_cpu_map__put(cpus);
256  		return NULL;
257  	}
258  
259  	(*idx)++;
260  	evsel->core.cpus = cpus;
261  	evsel->core.own_cpus = perf_cpu_map__get(cpus);
262  	evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
263  	evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
264  	evsel->auto_merge_stats = auto_merge_stats;
265  	evsel->pmu = pmu;
266  	evsel->pmu_name = pmu ? strdup(pmu->name) : NULL;
267  
268  	if (name)
269  		evsel->name = strdup(name);
270  
271  	if (metric_id)
272  		evsel->metric_id = strdup(metric_id);
273  
274  	if (config_terms)
275  		list_splice_init(config_terms, &evsel->config_terms);
276  
277  	if (list)
278  		list_add_tail(&evsel->core.node, list);
279  
280  	return evsel;
281  }
282  
parse_events__add_event(int idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct perf_pmu * pmu)283  struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
284  				      const char *name, const char *metric_id,
285  				      struct perf_pmu *pmu)
286  {
287  	return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
288  			   metric_id, pmu, /*config_terms=*/NULL,
289  			   /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
290  }
291  
add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct list_head * config_terms)292  static int add_event(struct list_head *list, int *idx,
293  		     struct perf_event_attr *attr, const char *name,
294  		     const char *metric_id, struct list_head *config_terms)
295  {
296  	return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
297  			   /*pmu=*/NULL, config_terms,
298  			   /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
299  }
300  
add_event_tool(struct list_head * list,int * idx,enum perf_tool_event tool_event)301  static int add_event_tool(struct list_head *list, int *idx,
302  			  enum perf_tool_event tool_event)
303  {
304  	struct evsel *evsel;
305  	struct perf_event_attr attr = {
306  		.type = PERF_TYPE_SOFTWARE,
307  		.config = PERF_COUNT_SW_DUMMY,
308  	};
309  	struct perf_cpu_map *cpu_list = NULL;
310  
311  	if (tool_event == PERF_TOOL_DURATION_TIME) {
312  		/* Duration time is gathered globally, pretend it is only on CPU0. */
313  		cpu_list = perf_cpu_map__new("0");
314  	}
315  	evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
316  			    /*metric_id=*/NULL, /*pmu=*/NULL,
317  			    /*config_terms=*/NULL, /*auto_merge_stats=*/false,
318  			    cpu_list);
319  	perf_cpu_map__put(cpu_list);
320  	if (!evsel)
321  		return -ENOMEM;
322  	evsel->tool_event = tool_event;
323  	if (tool_event == PERF_TOOL_DURATION_TIME
324  	    || tool_event == PERF_TOOL_USER_TIME
325  	    || tool_event == PERF_TOOL_SYSTEM_TIME) {
326  		free((char *)evsel->unit);
327  		evsel->unit = strdup("ns");
328  	}
329  	return 0;
330  }
331  
332  /**
333   * parse_aliases - search names for entries beginning or equalling str ignoring
334   *                 case. If mutliple entries in names match str then the longest
335   *                 is chosen.
336   * @str: The needle to look for.
337   * @names: The haystack to search.
338   * @size: The size of the haystack.
339   * @longest: Out argument giving the length of the matching entry.
340   */
parse_aliases(const char * str,const char * const names[][EVSEL__MAX_ALIASES],int size,int * longest)341  static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
342  			 int *longest)
343  {
344  	*longest = -1;
345  	for (int i = 0; i < size; i++) {
346  		for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
347  			int n = strlen(names[i][j]);
348  
349  			if (n > *longest && !strncasecmp(str, names[i][j], n))
350  				*longest = n;
351  		}
352  		if (*longest > 0)
353  			return i;
354  	}
355  
356  	return -1;
357  }
358  
359  typedef int config_term_func_t(struct perf_event_attr *attr,
360  			       struct parse_events_term *term,
361  			       struct parse_events_error *err);
362  static int config_term_common(struct perf_event_attr *attr,
363  			      struct parse_events_term *term,
364  			      struct parse_events_error *err);
365  static int config_attr(struct perf_event_attr *attr,
366  		       const struct parse_events_terms *head,
367  		       struct parse_events_error *err,
368  		       config_term_func_t config_term);
369  
370  /**
371   * parse_events__decode_legacy_cache - Search name for the legacy cache event
372   *                                     name composed of 1, 2 or 3 hyphen
373   *                                     separated sections. The first section is
374   *                                     the cache type while the others are the
375   *                                     optional op and optional result. To make
376   *                                     life hard the names in the table also
377   *                                     contain hyphens and the longest name
378   *                                     should always be selected.
379   */
parse_events__decode_legacy_cache(const char * name,int extended_pmu_type,__u64 * config)380  int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
381  {
382  	int len, cache_type = -1, cache_op = -1, cache_result = -1;
383  	const char *name_end = &name[strlen(name) + 1];
384  	const char *str = name;
385  
386  	cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
387  	if (cache_type == -1)
388  		return -EINVAL;
389  	str += len + 1;
390  
391  	if (str < name_end) {
392  		cache_op = parse_aliases(str, evsel__hw_cache_op,
393  					PERF_COUNT_HW_CACHE_OP_MAX, &len);
394  		if (cache_op >= 0) {
395  			if (!evsel__is_cache_op_valid(cache_type, cache_op))
396  				return -EINVAL;
397  			str += len + 1;
398  		} else {
399  			cache_result = parse_aliases(str, evsel__hw_cache_result,
400  						PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
401  			if (cache_result >= 0)
402  				str += len + 1;
403  		}
404  	}
405  	if (str < name_end) {
406  		if (cache_op < 0) {
407  			cache_op = parse_aliases(str, evsel__hw_cache_op,
408  						PERF_COUNT_HW_CACHE_OP_MAX, &len);
409  			if (cache_op >= 0) {
410  				if (!evsel__is_cache_op_valid(cache_type, cache_op))
411  					return -EINVAL;
412  			}
413  		} else if (cache_result < 0) {
414  			cache_result = parse_aliases(str, evsel__hw_cache_result,
415  						PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
416  		}
417  	}
418  
419  	/*
420  	 * Fall back to reads:
421  	 */
422  	if (cache_op == -1)
423  		cache_op = PERF_COUNT_HW_CACHE_OP_READ;
424  
425  	/*
426  	 * Fall back to accesses:
427  	 */
428  	if (cache_result == -1)
429  		cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
430  
431  	*config = cache_type | (cache_op << 8) | (cache_result << 16);
432  	if (perf_pmus__supports_extended_type())
433  		*config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
434  	return 0;
435  }
436  
437  /**
438   * parse_events__filter_pmu - returns false if a wildcard PMU should be
439   *                            considered, true if it should be filtered.
440   */
parse_events__filter_pmu(const struct parse_events_state * parse_state,const struct perf_pmu * pmu)441  bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
442  			      const struct perf_pmu *pmu)
443  {
444  	if (parse_state->pmu_filter == NULL)
445  		return false;
446  
447  	return strcmp(parse_state->pmu_filter, pmu->name) != 0;
448  }
449  
450  static int parse_events_add_pmu(struct parse_events_state *parse_state,
451  				struct list_head *list, struct perf_pmu *pmu,
452  				const struct parse_events_terms *const_parsed_terms,
453  				bool auto_merge_stats);
454  
parse_events_add_cache(struct list_head * list,int * idx,const char * name,struct parse_events_state * parse_state,struct parse_events_terms * parsed_terms)455  int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
456  			   struct parse_events_state *parse_state,
457  			   struct parse_events_terms *parsed_terms)
458  {
459  	struct perf_pmu *pmu = NULL;
460  	bool found_supported = false;
461  	const char *config_name = get_config_name(parsed_terms);
462  	const char *metric_id = get_config_metric_id(parsed_terms);
463  
464  	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
465  		LIST_HEAD(config_terms);
466  		struct perf_event_attr attr;
467  		int ret;
468  
469  		if (parse_events__filter_pmu(parse_state, pmu))
470  			continue;
471  
472  		if (perf_pmu__have_event(pmu, name)) {
473  			/*
474  			 * The PMU has the event so add as not a legacy cache
475  			 * event.
476  			 */
477  			ret = parse_events_add_pmu(parse_state, list, pmu,
478  						   parsed_terms,
479  						   perf_pmu__auto_merge_stats(pmu));
480  			if (ret)
481  				return ret;
482  			continue;
483  		}
484  
485  		if (!pmu->is_core) {
486  			/* Legacy cache events are only supported by core PMUs. */
487  			continue;
488  		}
489  
490  		memset(&attr, 0, sizeof(attr));
491  		attr.type = PERF_TYPE_HW_CACHE;
492  
493  		ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
494  		if (ret)
495  			return ret;
496  
497  		found_supported = true;
498  
499  		if (parsed_terms) {
500  			if (config_attr(&attr, parsed_terms, parse_state->error,
501  					config_term_common))
502  				return -EINVAL;
503  
504  			if (get_config_terms(parsed_terms, &config_terms))
505  				return -ENOMEM;
506  		}
507  
508  		if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
509  				metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
510  				/*cpu_list=*/NULL) == NULL)
511  			return -ENOMEM;
512  
513  		free_config_terms(&config_terms);
514  	}
515  	return found_supported ? 0 : -EINVAL;
516  }
517  
518  #ifdef HAVE_LIBTRACEEVENT
tracepoint_error(struct parse_events_error * e,int err,const char * sys,const char * name,int column)519  static void tracepoint_error(struct parse_events_error *e, int err,
520  			     const char *sys, const char *name, int column)
521  {
522  	const char *str;
523  	char help[BUFSIZ];
524  
525  	if (!e)
526  		return;
527  
528  	/*
529  	 * We get error directly from syscall errno ( > 0),
530  	 * or from encoded pointer's error ( < 0).
531  	 */
532  	err = abs(err);
533  
534  	switch (err) {
535  	case EACCES:
536  		str = "can't access trace events";
537  		break;
538  	case ENOENT:
539  		str = "unknown tracepoint";
540  		break;
541  	default:
542  		str = "failed to add tracepoint";
543  		break;
544  	}
545  
546  	tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
547  	parse_events_error__handle(e, column, strdup(str), strdup(help));
548  }
549  
add_tracepoint(struct parse_events_state * parse_state,struct list_head * list,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct parse_events_terms * head_config,void * loc_)550  static int add_tracepoint(struct parse_events_state *parse_state,
551  			  struct list_head *list,
552  			  const char *sys_name, const char *evt_name,
553  			  struct parse_events_error *err,
554  			  struct parse_events_terms *head_config, void *loc_)
555  {
556  	YYLTYPE *loc = loc_;
557  	struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
558  					       !parse_state->fake_tp);
559  
560  	if (IS_ERR(evsel)) {
561  		tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
562  		return PTR_ERR(evsel);
563  	}
564  
565  	if (head_config) {
566  		LIST_HEAD(config_terms);
567  
568  		if (get_config_terms(head_config, &config_terms))
569  			return -ENOMEM;
570  		list_splice(&config_terms, &evsel->config_terms);
571  	}
572  
573  	list_add_tail(&evsel->core.node, list);
574  	return 0;
575  }
576  
add_tracepoint_multi_event(struct parse_events_state * parse_state,struct list_head * list,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct parse_events_terms * head_config,YYLTYPE * loc)577  static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
578  				      struct list_head *list,
579  				      const char *sys_name, const char *evt_name,
580  				      struct parse_events_error *err,
581  				      struct parse_events_terms *head_config, YYLTYPE *loc)
582  {
583  	char *evt_path;
584  	struct dirent *evt_ent;
585  	DIR *evt_dir;
586  	int ret = 0, found = 0;
587  
588  	evt_path = get_events_file(sys_name);
589  	if (!evt_path) {
590  		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
591  		return -1;
592  	}
593  	evt_dir = opendir(evt_path);
594  	if (!evt_dir) {
595  		put_events_file(evt_path);
596  		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
597  		return -1;
598  	}
599  
600  	while (!ret && (evt_ent = readdir(evt_dir))) {
601  		if (!strcmp(evt_ent->d_name, ".")
602  		    || !strcmp(evt_ent->d_name, "..")
603  		    || !strcmp(evt_ent->d_name, "enable")
604  		    || !strcmp(evt_ent->d_name, "filter"))
605  			continue;
606  
607  		if (!strglobmatch(evt_ent->d_name, evt_name))
608  			continue;
609  
610  		found++;
611  
612  		ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
613  				     err, head_config, loc);
614  	}
615  
616  	if (!found) {
617  		tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
618  		ret = -1;
619  	}
620  
621  	put_events_file(evt_path);
622  	closedir(evt_dir);
623  	return ret;
624  }
625  
add_tracepoint_event(struct parse_events_state * parse_state,struct list_head * list,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct parse_events_terms * head_config,YYLTYPE * loc)626  static int add_tracepoint_event(struct parse_events_state *parse_state,
627  				struct list_head *list,
628  				const char *sys_name, const char *evt_name,
629  				struct parse_events_error *err,
630  				struct parse_events_terms *head_config, YYLTYPE *loc)
631  {
632  	return strpbrk(evt_name, "*?") ?
633  		add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
634  					   err, head_config, loc) :
635  		add_tracepoint(parse_state, list, sys_name, evt_name,
636  			       err, head_config, loc);
637  }
638  
add_tracepoint_multi_sys(struct parse_events_state * parse_state,struct list_head * list,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct parse_events_terms * head_config,YYLTYPE * loc)639  static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
640  				    struct list_head *list,
641  				    const char *sys_name, const char *evt_name,
642  				    struct parse_events_error *err,
643  				    struct parse_events_terms *head_config, YYLTYPE *loc)
644  {
645  	struct dirent *events_ent;
646  	DIR *events_dir;
647  	int ret = 0;
648  
649  	events_dir = tracing_events__opendir();
650  	if (!events_dir) {
651  		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
652  		return -1;
653  	}
654  
655  	while (!ret && (events_ent = readdir(events_dir))) {
656  		if (!strcmp(events_ent->d_name, ".")
657  		    || !strcmp(events_ent->d_name, "..")
658  		    || !strcmp(events_ent->d_name, "enable")
659  		    || !strcmp(events_ent->d_name, "header_event")
660  		    || !strcmp(events_ent->d_name, "header_page"))
661  			continue;
662  
663  		if (!strglobmatch(events_ent->d_name, sys_name))
664  			continue;
665  
666  		ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
667  					   evt_name, err, head_config, loc);
668  	}
669  
670  	closedir(events_dir);
671  	return ret;
672  }
673  #endif /* HAVE_LIBTRACEEVENT */
674  
default_breakpoint_len(void)675  size_t default_breakpoint_len(void)
676  {
677  #if defined(__i386__)
678  	static int len;
679  
680  	if (len == 0) {
681  		struct perf_env env = {};
682  
683  		perf_env__init(&env);
684  		len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
685  		perf_env__exit(&env);
686  	}
687  	return len;
688  #elif defined(__aarch64__)
689  	return 4;
690  #else
691  	return sizeof(long);
692  #endif
693  }
694  
695  static int
parse_breakpoint_type(const char * type,struct perf_event_attr * attr)696  parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
697  {
698  	int i;
699  
700  	for (i = 0; i < 3; i++) {
701  		if (!type || !type[i])
702  			break;
703  
704  #define CHECK_SET_TYPE(bit)		\
705  do {					\
706  	if (attr->bp_type & bit)	\
707  		return -EINVAL;		\
708  	else				\
709  		attr->bp_type |= bit;	\
710  } while (0)
711  
712  		switch (type[i]) {
713  		case 'r':
714  			CHECK_SET_TYPE(HW_BREAKPOINT_R);
715  			break;
716  		case 'w':
717  			CHECK_SET_TYPE(HW_BREAKPOINT_W);
718  			break;
719  		case 'x':
720  			CHECK_SET_TYPE(HW_BREAKPOINT_X);
721  			break;
722  		default:
723  			return -EINVAL;
724  		}
725  	}
726  
727  #undef CHECK_SET_TYPE
728  
729  	if (!attr->bp_type) /* Default */
730  		attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
731  
732  	return 0;
733  }
734  
parse_events_add_breakpoint(struct parse_events_state * parse_state,struct list_head * list,u64 addr,char * type,u64 len,struct parse_events_terms * head_config)735  int parse_events_add_breakpoint(struct parse_events_state *parse_state,
736  				struct list_head *list,
737  				u64 addr, char *type, u64 len,
738  				struct parse_events_terms *head_config)
739  {
740  	struct perf_event_attr attr;
741  	LIST_HEAD(config_terms);
742  	const char *name;
743  
744  	memset(&attr, 0, sizeof(attr));
745  	attr.bp_addr = addr;
746  
747  	if (parse_breakpoint_type(type, &attr))
748  		return -EINVAL;
749  
750  	/* Provide some defaults if len is not specified */
751  	if (!len) {
752  		if (attr.bp_type == HW_BREAKPOINT_X)
753  			len = default_breakpoint_len();
754  		else
755  			len = HW_BREAKPOINT_LEN_4;
756  	}
757  
758  	attr.bp_len = len;
759  
760  	attr.type = PERF_TYPE_BREAKPOINT;
761  	attr.sample_period = 1;
762  
763  	if (head_config) {
764  		if (config_attr(&attr, head_config, parse_state->error,
765  				config_term_common))
766  			return -EINVAL;
767  
768  		if (get_config_terms(head_config, &config_terms))
769  			return -ENOMEM;
770  	}
771  
772  	name = get_config_name(head_config);
773  
774  	return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
775  			 &config_terms);
776  }
777  
check_type_val(struct parse_events_term * term,struct parse_events_error * err,enum parse_events__term_val_type type)778  static int check_type_val(struct parse_events_term *term,
779  			  struct parse_events_error *err,
780  			  enum parse_events__term_val_type type)
781  {
782  	if (type == term->type_val)
783  		return 0;
784  
785  	if (err) {
786  		parse_events_error__handle(err, term->err_val,
787  					type == PARSE_EVENTS__TERM_TYPE_NUM
788  					? strdup("expected numeric value")
789  					: strdup("expected string value"),
790  					NULL);
791  	}
792  	return -EINVAL;
793  }
794  
795  static bool config_term_shrinked;
796  
config_term_name(enum parse_events__term_type term_type)797  static const char *config_term_name(enum parse_events__term_type term_type)
798  {
799  	/*
800  	 * Update according to parse-events.l
801  	 */
802  	static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
803  		[PARSE_EVENTS__TERM_TYPE_USER]			= "<sysfs term>",
804  		[PARSE_EVENTS__TERM_TYPE_CONFIG]		= "config",
805  		[PARSE_EVENTS__TERM_TYPE_CONFIG1]		= "config1",
806  		[PARSE_EVENTS__TERM_TYPE_CONFIG2]		= "config2",
807  		[PARSE_EVENTS__TERM_TYPE_CONFIG3]		= "config3",
808  		[PARSE_EVENTS__TERM_TYPE_NAME]			= "name",
809  		[PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD]		= "period",
810  		[PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ]		= "freq",
811  		[PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE]	= "branch_type",
812  		[PARSE_EVENTS__TERM_TYPE_TIME]			= "time",
813  		[PARSE_EVENTS__TERM_TYPE_CALLGRAPH]		= "call-graph",
814  		[PARSE_EVENTS__TERM_TYPE_STACKSIZE]		= "stack-size",
815  		[PARSE_EVENTS__TERM_TYPE_NOINHERIT]		= "no-inherit",
816  		[PARSE_EVENTS__TERM_TYPE_INHERIT]		= "inherit",
817  		[PARSE_EVENTS__TERM_TYPE_MAX_STACK]		= "max-stack",
818  		[PARSE_EVENTS__TERM_TYPE_MAX_EVENTS]		= "nr",
819  		[PARSE_EVENTS__TERM_TYPE_OVERWRITE]		= "overwrite",
820  		[PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]		= "no-overwrite",
821  		[PARSE_EVENTS__TERM_TYPE_DRV_CFG]		= "driver-config",
822  		[PARSE_EVENTS__TERM_TYPE_PERCORE]		= "percore",
823  		[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT]		= "aux-output",
824  		[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE]	= "aux-sample-size",
825  		[PARSE_EVENTS__TERM_TYPE_METRIC_ID]		= "metric-id",
826  		[PARSE_EVENTS__TERM_TYPE_RAW]                   = "raw",
827  		[PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE]          = "legacy-cache",
828  		[PARSE_EVENTS__TERM_TYPE_HARDWARE]              = "hardware",
829  	};
830  	if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
831  		return "unknown term";
832  
833  	return config_term_names[term_type];
834  }
835  
836  static bool
config_term_avail(enum parse_events__term_type term_type,struct parse_events_error * err)837  config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
838  {
839  	char *err_str;
840  
841  	if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
842  		parse_events_error__handle(err, -1,
843  					strdup("Invalid term_type"), NULL);
844  		return false;
845  	}
846  	if (!config_term_shrinked)
847  		return true;
848  
849  	switch (term_type) {
850  	case PARSE_EVENTS__TERM_TYPE_CONFIG:
851  	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
852  	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
853  	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
854  	case PARSE_EVENTS__TERM_TYPE_NAME:
855  	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
856  	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
857  	case PARSE_EVENTS__TERM_TYPE_PERCORE:
858  		return true;
859  	case PARSE_EVENTS__TERM_TYPE_USER:
860  	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
861  	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
862  	case PARSE_EVENTS__TERM_TYPE_TIME:
863  	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
864  	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
865  	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
866  	case PARSE_EVENTS__TERM_TYPE_INHERIT:
867  	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
868  	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
869  	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
870  	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
871  	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
872  	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
873  	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
874  	case PARSE_EVENTS__TERM_TYPE_RAW:
875  	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
876  	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
877  	default:
878  		if (!err)
879  			return false;
880  
881  		/* term_type is validated so indexing is safe */
882  		if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
883  			     config_term_name(term_type)) >= 0)
884  			parse_events_error__handle(err, -1, err_str, NULL);
885  		return false;
886  	}
887  }
888  
parse_events__shrink_config_terms(void)889  void parse_events__shrink_config_terms(void)
890  {
891  	config_term_shrinked = true;
892  }
893  
config_term_common(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)894  static int config_term_common(struct perf_event_attr *attr,
895  			      struct parse_events_term *term,
896  			      struct parse_events_error *err)
897  {
898  #define CHECK_TYPE_VAL(type)						   \
899  do {									   \
900  	if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
901  		return -EINVAL;						   \
902  } while (0)
903  
904  	switch (term->type_term) {
905  	case PARSE_EVENTS__TERM_TYPE_CONFIG:
906  		CHECK_TYPE_VAL(NUM);
907  		attr->config = term->val.num;
908  		break;
909  	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
910  		CHECK_TYPE_VAL(NUM);
911  		attr->config1 = term->val.num;
912  		break;
913  	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
914  		CHECK_TYPE_VAL(NUM);
915  		attr->config2 = term->val.num;
916  		break;
917  	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
918  		CHECK_TYPE_VAL(NUM);
919  		attr->config3 = term->val.num;
920  		break;
921  	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
922  		CHECK_TYPE_VAL(NUM);
923  		break;
924  	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
925  		CHECK_TYPE_VAL(NUM);
926  		break;
927  	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
928  		CHECK_TYPE_VAL(STR);
929  		if (strcmp(term->val.str, "no") &&
930  		    parse_branch_str(term->val.str,
931  				    &attr->branch_sample_type)) {
932  			parse_events_error__handle(err, term->err_val,
933  					strdup("invalid branch sample type"),
934  					NULL);
935  			return -EINVAL;
936  		}
937  		break;
938  	case PARSE_EVENTS__TERM_TYPE_TIME:
939  		CHECK_TYPE_VAL(NUM);
940  		if (term->val.num > 1) {
941  			parse_events_error__handle(err, term->err_val,
942  						strdup("expected 0 or 1"),
943  						NULL);
944  			return -EINVAL;
945  		}
946  		break;
947  	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
948  		CHECK_TYPE_VAL(STR);
949  		break;
950  	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
951  		CHECK_TYPE_VAL(NUM);
952  		break;
953  	case PARSE_EVENTS__TERM_TYPE_INHERIT:
954  		CHECK_TYPE_VAL(NUM);
955  		break;
956  	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
957  		CHECK_TYPE_VAL(NUM);
958  		break;
959  	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
960  		CHECK_TYPE_VAL(NUM);
961  		break;
962  	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
963  		CHECK_TYPE_VAL(NUM);
964  		break;
965  	case PARSE_EVENTS__TERM_TYPE_NAME:
966  		CHECK_TYPE_VAL(STR);
967  		break;
968  	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
969  		CHECK_TYPE_VAL(STR);
970  		break;
971  	case PARSE_EVENTS__TERM_TYPE_RAW:
972  		CHECK_TYPE_VAL(STR);
973  		break;
974  	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
975  		CHECK_TYPE_VAL(NUM);
976  		break;
977  	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
978  		CHECK_TYPE_VAL(NUM);
979  		break;
980  	case PARSE_EVENTS__TERM_TYPE_PERCORE:
981  		CHECK_TYPE_VAL(NUM);
982  		if ((unsigned int)term->val.num > 1) {
983  			parse_events_error__handle(err, term->err_val,
984  						strdup("expected 0 or 1"),
985  						NULL);
986  			return -EINVAL;
987  		}
988  		break;
989  	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
990  		CHECK_TYPE_VAL(NUM);
991  		break;
992  	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
993  		CHECK_TYPE_VAL(NUM);
994  		if (term->val.num > UINT_MAX) {
995  			parse_events_error__handle(err, term->err_val,
996  						strdup("too big"),
997  						NULL);
998  			return -EINVAL;
999  		}
1000  		break;
1001  	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1002  	case PARSE_EVENTS__TERM_TYPE_USER:
1003  	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1004  	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1005  	default:
1006  		parse_events_error__handle(err, term->err_term,
1007  					strdup(config_term_name(term->type_term)),
1008  					parse_events_formats_error_string(NULL));
1009  		return -EINVAL;
1010  	}
1011  
1012  	/*
1013  	 * Check term availability after basic checking so
1014  	 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1015  	 *
1016  	 * If check availability at the entry of this function,
1017  	 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1018  	 * if an invalid config term is provided for legacy events
1019  	 * (for example, instructions/badterm/...), which is confusing.
1020  	 */
1021  	if (!config_term_avail(term->type_term, err))
1022  		return -EINVAL;
1023  	return 0;
1024  #undef CHECK_TYPE_VAL
1025  }
1026  
config_term_pmu(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1027  static int config_term_pmu(struct perf_event_attr *attr,
1028  			   struct parse_events_term *term,
1029  			   struct parse_events_error *err)
1030  {
1031  	if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1032  		struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1033  
1034  		if (!pmu) {
1035  			char *err_str;
1036  
1037  			if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1038  				parse_events_error__handle(err, term->err_term,
1039  							   err_str, /*help=*/NULL);
1040  			return -EINVAL;
1041  		}
1042  		/*
1043  		 * Rewrite the PMU event to a legacy cache one unless the PMU
1044  		 * doesn't support legacy cache events or the event is present
1045  		 * within the PMU.
1046  		 */
1047  		if (perf_pmu__supports_legacy_cache(pmu) &&
1048  		    !perf_pmu__have_event(pmu, term->config)) {
1049  			attr->type = PERF_TYPE_HW_CACHE;
1050  			return parse_events__decode_legacy_cache(term->config, pmu->type,
1051  								 &attr->config);
1052  		} else {
1053  			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1054  			term->no_value = true;
1055  		}
1056  	}
1057  	if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1058  		struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1059  
1060  		if (!pmu) {
1061  			char *err_str;
1062  
1063  			if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1064  				parse_events_error__handle(err, term->err_term,
1065  							   err_str, /*help=*/NULL);
1066  			return -EINVAL;
1067  		}
1068  		/*
1069  		 * If the PMU has a sysfs or json event prefer it over
1070  		 * legacy. ARM requires this.
1071  		 */
1072  		if (perf_pmu__have_event(pmu, term->config)) {
1073  			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1074  			term->no_value = true;
1075  		} else {
1076  			attr->type = PERF_TYPE_HARDWARE;
1077  			attr->config = term->val.num;
1078  			if (perf_pmus__supports_extended_type())
1079  				attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1080  		}
1081  		return 0;
1082  	}
1083  	if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1084  	    term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1085  		/*
1086  		 * Always succeed for sysfs terms, as we dont know
1087  		 * at this point what type they need to have.
1088  		 */
1089  		return 0;
1090  	}
1091  	return config_term_common(attr, term, err);
1092  }
1093  
1094  #ifdef HAVE_LIBTRACEEVENT
config_term_tracepoint(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1095  static int config_term_tracepoint(struct perf_event_attr *attr,
1096  				  struct parse_events_term *term,
1097  				  struct parse_events_error *err)
1098  {
1099  	switch (term->type_term) {
1100  	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1101  	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1102  	case PARSE_EVENTS__TERM_TYPE_INHERIT:
1103  	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1104  	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1105  	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1106  	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1107  	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1108  	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1109  	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1110  		return config_term_common(attr, term, err);
1111  	case PARSE_EVENTS__TERM_TYPE_USER:
1112  	case PARSE_EVENTS__TERM_TYPE_CONFIG:
1113  	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1114  	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1115  	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1116  	case PARSE_EVENTS__TERM_TYPE_NAME:
1117  	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1118  	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1119  	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1120  	case PARSE_EVENTS__TERM_TYPE_TIME:
1121  	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1122  	case PARSE_EVENTS__TERM_TYPE_PERCORE:
1123  	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1124  	case PARSE_EVENTS__TERM_TYPE_RAW:
1125  	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1126  	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1127  	default:
1128  		if (err) {
1129  			parse_events_error__handle(err, term->err_term,
1130  						   strdup(config_term_name(term->type_term)),
1131  				strdup("valid terms: call-graph,stack-size\n"));
1132  		}
1133  		return -EINVAL;
1134  	}
1135  
1136  	return 0;
1137  }
1138  #endif
1139  
config_attr(struct perf_event_attr * attr,const struct parse_events_terms * head,struct parse_events_error * err,config_term_func_t config_term)1140  static int config_attr(struct perf_event_attr *attr,
1141  		       const struct parse_events_terms *head,
1142  		       struct parse_events_error *err,
1143  		       config_term_func_t config_term)
1144  {
1145  	struct parse_events_term *term;
1146  
1147  	list_for_each_entry(term, &head->terms, list)
1148  		if (config_term(attr, term, err))
1149  			return -EINVAL;
1150  
1151  	return 0;
1152  }
1153  
get_config_terms(const struct parse_events_terms * head_config,struct list_head * head_terms)1154  static int get_config_terms(const struct parse_events_terms *head_config,
1155  			    struct list_head *head_terms)
1156  {
1157  #define ADD_CONFIG_TERM(__type, __weak)				\
1158  	struct evsel_config_term *__t;			\
1159  								\
1160  	__t = zalloc(sizeof(*__t));				\
1161  	if (!__t)						\
1162  		return -ENOMEM;					\
1163  								\
1164  	INIT_LIST_HEAD(&__t->list);				\
1165  	__t->type       = EVSEL__CONFIG_TERM_ ## __type;	\
1166  	__t->weak	= __weak;				\
1167  	list_add_tail(&__t->list, head_terms)
1168  
1169  #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)	\
1170  do {								\
1171  	ADD_CONFIG_TERM(__type, __weak);			\
1172  	__t->val.__name = __val;				\
1173  } while (0)
1174  
1175  #define ADD_CONFIG_TERM_STR(__type, __val, __weak)		\
1176  do {								\
1177  	ADD_CONFIG_TERM(__type, __weak);			\
1178  	__t->val.str = strdup(__val);				\
1179  	if (!__t->val.str) {					\
1180  		zfree(&__t);					\
1181  		return -ENOMEM;					\
1182  	}							\
1183  	__t->free_str = true;					\
1184  } while (0)
1185  
1186  	struct parse_events_term *term;
1187  
1188  	list_for_each_entry(term, &head_config->terms, list) {
1189  		switch (term->type_term) {
1190  		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1191  			ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1192  			break;
1193  		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1194  			ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1195  			break;
1196  		case PARSE_EVENTS__TERM_TYPE_TIME:
1197  			ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1198  			break;
1199  		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1200  			ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1201  			break;
1202  		case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1203  			ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1204  			break;
1205  		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1206  			ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1207  					    term->val.num, term->weak);
1208  			break;
1209  		case PARSE_EVENTS__TERM_TYPE_INHERIT:
1210  			ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1211  					    term->val.num ? 1 : 0, term->weak);
1212  			break;
1213  		case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1214  			ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1215  					    term->val.num ? 0 : 1, term->weak);
1216  			break;
1217  		case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1218  			ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1219  					    term->val.num, term->weak);
1220  			break;
1221  		case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1222  			ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1223  					    term->val.num, term->weak);
1224  			break;
1225  		case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1226  			ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1227  					    term->val.num ? 1 : 0, term->weak);
1228  			break;
1229  		case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1230  			ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1231  					    term->val.num ? 0 : 1, term->weak);
1232  			break;
1233  		case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1234  			ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1235  			break;
1236  		case PARSE_EVENTS__TERM_TYPE_PERCORE:
1237  			ADD_CONFIG_TERM_VAL(PERCORE, percore,
1238  					    term->val.num ? true : false, term->weak);
1239  			break;
1240  		case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1241  			ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1242  					    term->val.num ? 1 : 0, term->weak);
1243  			break;
1244  		case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1245  			ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1246  					    term->val.num, term->weak);
1247  			break;
1248  		case PARSE_EVENTS__TERM_TYPE_USER:
1249  		case PARSE_EVENTS__TERM_TYPE_CONFIG:
1250  		case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1251  		case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1252  		case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1253  		case PARSE_EVENTS__TERM_TYPE_NAME:
1254  		case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1255  		case PARSE_EVENTS__TERM_TYPE_RAW:
1256  		case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1257  		case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1258  		default:
1259  			break;
1260  		}
1261  	}
1262  	return 0;
1263  }
1264  
1265  /*
1266   * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1267   * each bit of attr->config that the user has changed.
1268   */
get_config_chgs(struct perf_pmu * pmu,struct parse_events_terms * head_config,struct list_head * head_terms)1269  static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1270  			   struct list_head *head_terms)
1271  {
1272  	struct parse_events_term *term;
1273  	u64 bits = 0;
1274  	int type;
1275  
1276  	list_for_each_entry(term, &head_config->terms, list) {
1277  		switch (term->type_term) {
1278  		case PARSE_EVENTS__TERM_TYPE_USER:
1279  			type = perf_pmu__format_type(pmu, term->config);
1280  			if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1281  				continue;
1282  			bits |= perf_pmu__format_bits(pmu, term->config);
1283  			break;
1284  		case PARSE_EVENTS__TERM_TYPE_CONFIG:
1285  			bits = ~(u64)0;
1286  			break;
1287  		case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1288  		case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1289  		case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1290  		case PARSE_EVENTS__TERM_TYPE_NAME:
1291  		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1292  		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1293  		case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1294  		case PARSE_EVENTS__TERM_TYPE_TIME:
1295  		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1296  		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1297  		case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1298  		case PARSE_EVENTS__TERM_TYPE_INHERIT:
1299  		case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1300  		case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1301  		case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1302  		case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1303  		case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1304  		case PARSE_EVENTS__TERM_TYPE_PERCORE:
1305  		case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1306  		case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1307  		case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1308  		case PARSE_EVENTS__TERM_TYPE_RAW:
1309  		case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1310  		case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1311  		default:
1312  			break;
1313  		}
1314  	}
1315  
1316  	if (bits)
1317  		ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1318  
1319  #undef ADD_CONFIG_TERM
1320  	return 0;
1321  }
1322  
parse_events_add_tracepoint(struct parse_events_state * parse_state,struct list_head * list,const char * sys,const char * event,struct parse_events_error * err,struct parse_events_terms * head_config,void * loc_)1323  int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1324  				struct list_head *list,
1325  				const char *sys, const char *event,
1326  				struct parse_events_error *err,
1327  				struct parse_events_terms *head_config, void *loc_)
1328  {
1329  	YYLTYPE *loc = loc_;
1330  #ifdef HAVE_LIBTRACEEVENT
1331  	if (head_config) {
1332  		struct perf_event_attr attr;
1333  
1334  		if (config_attr(&attr, head_config, err,
1335  				config_term_tracepoint))
1336  			return -EINVAL;
1337  	}
1338  
1339  	if (strpbrk(sys, "*?"))
1340  		return add_tracepoint_multi_sys(parse_state, list, sys, event,
1341  						err, head_config, loc);
1342  	else
1343  		return add_tracepoint_event(parse_state, list, sys, event,
1344  					    err, head_config, loc);
1345  #else
1346  	(void)parse_state;
1347  	(void)list;
1348  	(void)sys;
1349  	(void)event;
1350  	(void)head_config;
1351  	parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
1352  				strdup("libtraceevent is necessary for tracepoint support"));
1353  	return -1;
1354  #endif
1355  }
1356  
__parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,struct perf_pmu * pmu,u32 type,u32 extended_type,u64 config,const struct parse_events_terms * head_config)1357  static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1358  				struct list_head *list,
1359  				struct perf_pmu *pmu, u32 type, u32 extended_type,
1360  				u64 config, const struct parse_events_terms *head_config)
1361  {
1362  	struct perf_event_attr attr;
1363  	LIST_HEAD(config_terms);
1364  	const char *name, *metric_id;
1365  	int ret;
1366  
1367  	memset(&attr, 0, sizeof(attr));
1368  	attr.type = type;
1369  	attr.config = config;
1370  	if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1371  		assert(perf_pmus__supports_extended_type());
1372  		attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1373  	}
1374  
1375  	if (head_config) {
1376  		if (config_attr(&attr, head_config, parse_state->error,
1377  				config_term_common))
1378  			return -EINVAL;
1379  
1380  		if (get_config_terms(head_config, &config_terms))
1381  			return -ENOMEM;
1382  	}
1383  
1384  	name = get_config_name(head_config);
1385  	metric_id = get_config_metric_id(head_config);
1386  	ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1387  			metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
1388  			/*cpu_list=*/NULL) ? 0 : -ENOMEM;
1389  	free_config_terms(&config_terms);
1390  	return ret;
1391  }
1392  
parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,u32 type,u64 config,const struct parse_events_terms * head_config,bool wildcard)1393  int parse_events_add_numeric(struct parse_events_state *parse_state,
1394  			     struct list_head *list,
1395  			     u32 type, u64 config,
1396  			     const struct parse_events_terms *head_config,
1397  			     bool wildcard)
1398  {
1399  	struct perf_pmu *pmu = NULL;
1400  	bool found_supported = false;
1401  
1402  	/* Wildcards on numeric values are only supported by core PMUs. */
1403  	if (wildcard && perf_pmus__supports_extended_type()) {
1404  		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1405  			int ret;
1406  
1407  			found_supported = true;
1408  			if (parse_events__filter_pmu(parse_state, pmu))
1409  				continue;
1410  
1411  			ret = __parse_events_add_numeric(parse_state, list, pmu,
1412  							 type, pmu->type,
1413  							 config, head_config);
1414  			if (ret)
1415  				return ret;
1416  		}
1417  		if (found_supported)
1418  			return 0;
1419  	}
1420  	return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1421  					type, /*extended_type=*/0, config, head_config);
1422  }
1423  
parse_events_add_tool(struct parse_events_state * parse_state,struct list_head * list,int tool_event)1424  int parse_events_add_tool(struct parse_events_state *parse_state,
1425  			  struct list_head *list,
1426  			  int tool_event)
1427  {
1428  	return add_event_tool(list, &parse_state->idx, tool_event);
1429  }
1430  
config_term_percore(struct list_head * config_terms)1431  static bool config_term_percore(struct list_head *config_terms)
1432  {
1433  	struct evsel_config_term *term;
1434  
1435  	list_for_each_entry(term, config_terms, list) {
1436  		if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1437  			return term->val.percore;
1438  	}
1439  
1440  	return false;
1441  }
1442  
parse_events_add_pmu(struct parse_events_state * parse_state,struct list_head * list,struct perf_pmu * pmu,const struct parse_events_terms * const_parsed_terms,bool auto_merge_stats)1443  static int parse_events_add_pmu(struct parse_events_state *parse_state,
1444  				struct list_head *list, struct perf_pmu *pmu,
1445  				const struct parse_events_terms *const_parsed_terms,
1446  				bool auto_merge_stats)
1447  {
1448  	struct perf_event_attr attr;
1449  	struct perf_pmu_info info;
1450  	struct evsel *evsel;
1451  	struct parse_events_error *err = parse_state->error;
1452  	LIST_HEAD(config_terms);
1453  	struct parse_events_terms parsed_terms;
1454  	bool alias_rewrote_terms = false;
1455  
1456  	if (verbose > 1) {
1457  		struct strbuf sb;
1458  
1459  		strbuf_init(&sb, /*hint=*/ 0);
1460  		if (pmu->selectable && const_parsed_terms &&
1461  		    list_empty(&const_parsed_terms->terms)) {
1462  			strbuf_addf(&sb, "%s//", pmu->name);
1463  		} else {
1464  			strbuf_addf(&sb, "%s/", pmu->name);
1465  			parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1466  			strbuf_addch(&sb, '/');
1467  		}
1468  		fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1469  		strbuf_release(&sb);
1470  	}
1471  
1472  	memset(&attr, 0, sizeof(attr));
1473  	if (pmu->perf_event_attr_init_default)
1474  		pmu->perf_event_attr_init_default(pmu, &attr);
1475  
1476  	attr.type = pmu->type;
1477  
1478  	if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1479  		evsel = __add_event(list, &parse_state->idx, &attr,
1480  				    /*init_attr=*/true, /*name=*/NULL,
1481  				    /*metric_id=*/NULL, pmu,
1482  				    /*config_terms=*/NULL, auto_merge_stats,
1483  				    /*cpu_list=*/NULL);
1484  		return evsel ? 0 : -ENOMEM;
1485  	}
1486  
1487  	parse_events_terms__init(&parsed_terms);
1488  	if (const_parsed_terms) {
1489  		int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1490  
1491  		if (ret)
1492  			return ret;
1493  	}
1494  	fix_raw(&parsed_terms, pmu);
1495  
1496  	/* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1497  	if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1498  		parse_events_terms__exit(&parsed_terms);
1499  		return -EINVAL;
1500  	}
1501  
1502  	/* Look for event names in the terms and rewrite into format based terms. */
1503  	if (perf_pmu__check_alias(pmu, &parsed_terms,
1504  				  &info, &alias_rewrote_terms, err)) {
1505  		parse_events_terms__exit(&parsed_terms);
1506  		return -EINVAL;
1507  	}
1508  
1509  	if (verbose > 1) {
1510  		struct strbuf sb;
1511  
1512  		strbuf_init(&sb, /*hint=*/ 0);
1513  		parse_events_terms__to_strbuf(&parsed_terms, &sb);
1514  		fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1515  		strbuf_release(&sb);
1516  	}
1517  
1518  	/* Configure attr/terms again if an alias was expanded. */
1519  	if (alias_rewrote_terms &&
1520  	    config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1521  		parse_events_terms__exit(&parsed_terms);
1522  		return -EINVAL;
1523  	}
1524  
1525  	if (get_config_terms(&parsed_terms, &config_terms)) {
1526  		parse_events_terms__exit(&parsed_terms);
1527  		return -ENOMEM;
1528  	}
1529  
1530  	/*
1531  	 * When using default config, record which bits of attr->config were
1532  	 * changed by the user.
1533  	 */
1534  	if (pmu->perf_event_attr_init_default &&
1535  	    get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1536  		parse_events_terms__exit(&parsed_terms);
1537  		return -ENOMEM;
1538  	}
1539  
1540  	if (perf_pmu__config(pmu, &attr, &parsed_terms, parse_state->error)) {
1541  		free_config_terms(&config_terms);
1542  		parse_events_terms__exit(&parsed_terms);
1543  		return -EINVAL;
1544  	}
1545  
1546  	evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1547  			    get_config_name(&parsed_terms),
1548  			    get_config_metric_id(&parsed_terms), pmu,
1549  			    &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
1550  	if (!evsel) {
1551  		parse_events_terms__exit(&parsed_terms);
1552  		return -ENOMEM;
1553  	}
1554  
1555  	if (evsel->name)
1556  		evsel->use_config_name = true;
1557  
1558  	evsel->percore = config_term_percore(&evsel->config_terms);
1559  
1560  	parse_events_terms__exit(&parsed_terms);
1561  	free((char *)evsel->unit);
1562  	evsel->unit = strdup(info.unit);
1563  	evsel->scale = info.scale;
1564  	evsel->per_pkg = info.per_pkg;
1565  	evsel->snapshot = info.snapshot;
1566  	return 0;
1567  }
1568  
parse_events_multi_pmu_add(struct parse_events_state * parse_state,const char * event_name,const struct parse_events_terms * const_parsed_terms,struct list_head ** listp,void * loc_)1569  int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1570  			       const char *event_name,
1571  			       const struct parse_events_terms *const_parsed_terms,
1572  			       struct list_head **listp, void *loc_)
1573  {
1574  	struct parse_events_term *term;
1575  	struct list_head *list = NULL;
1576  	struct perf_pmu *pmu = NULL;
1577  	YYLTYPE *loc = loc_;
1578  	int ok = 0;
1579  	const char *config;
1580  	struct parse_events_terms parsed_terms;
1581  
1582  	*listp = NULL;
1583  
1584  	parse_events_terms__init(&parsed_terms);
1585  	if (const_parsed_terms) {
1586  		int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1587  
1588  		if (ret)
1589  			return ret;
1590  	}
1591  
1592  	config = strdup(event_name);
1593  	if (!config)
1594  		goto out_err;
1595  
1596  	if (parse_events_term__num(&term,
1597  				   PARSE_EVENTS__TERM_TYPE_USER,
1598  				   config, /*num=*/1, /*novalue=*/true,
1599  				   loc, /*loc_val=*/NULL) < 0) {
1600  		zfree(&config);
1601  		goto out_err;
1602  	}
1603  	list_add_tail(&term->list, &parsed_terms.terms);
1604  
1605  	/* Add it for all PMUs that support the alias */
1606  	list = malloc(sizeof(struct list_head));
1607  	if (!list)
1608  		goto out_err;
1609  
1610  	INIT_LIST_HEAD(list);
1611  
1612  	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1613  		bool auto_merge_stats;
1614  
1615  		if (parse_events__filter_pmu(parse_state, pmu))
1616  			continue;
1617  
1618  		if (!perf_pmu__have_event(pmu, event_name))
1619  			continue;
1620  
1621  		auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
1622  		if (!parse_events_add_pmu(parse_state, list, pmu,
1623  					  &parsed_terms, auto_merge_stats)) {
1624  			struct strbuf sb;
1625  
1626  			strbuf_init(&sb, /*hint=*/ 0);
1627  			parse_events_terms__to_strbuf(&parsed_terms, &sb);
1628  			pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1629  			strbuf_release(&sb);
1630  			ok++;
1631  		}
1632  	}
1633  
1634  	if (parse_state->fake_pmu) {
1635  		if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1636  					  /*auto_merge_stats=*/true)) {
1637  			struct strbuf sb;
1638  
1639  			strbuf_init(&sb, /*hint=*/ 0);
1640  			parse_events_terms__to_strbuf(&parsed_terms, &sb);
1641  			pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1642  			strbuf_release(&sb);
1643  			ok++;
1644  		}
1645  	}
1646  
1647  out_err:
1648  	parse_events_terms__exit(&parsed_terms);
1649  	if (ok)
1650  		*listp = list;
1651  	else
1652  		free(list);
1653  
1654  	return ok ? 0 : -1;
1655  }
1656  
parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state * parse_state,const char * event_or_pmu,const struct parse_events_terms * const_parsed_terms,struct list_head ** listp,void * loc_)1657  int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1658  					const char *event_or_pmu,
1659  					const struct parse_events_terms *const_parsed_terms,
1660  					struct list_head **listp,
1661  					void *loc_)
1662  {
1663  	YYLTYPE *loc = loc_;
1664  	struct perf_pmu *pmu;
1665  	int ok = 0;
1666  	char *help;
1667  
1668  	*listp = malloc(sizeof(**listp));
1669  	if (!*listp)
1670  		return -ENOMEM;
1671  
1672  	INIT_LIST_HEAD(*listp);
1673  
1674  	/* Attempt to add to list assuming event_or_pmu is a PMU name. */
1675  	pmu = perf_pmus__find(event_or_pmu);
1676  	if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1677  					/*auto_merge_stats=*/false))
1678  		return 0;
1679  
1680  	if (parse_state->fake_pmu) {
1681  		if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1682  					  const_parsed_terms,
1683  					  /*auto_merge_stats=*/false))
1684  			return 0;
1685  	}
1686  
1687  	pmu = NULL;
1688  	/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1689  	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1690  		if (!parse_events__filter_pmu(parse_state, pmu) &&
1691  		    perf_pmu__match(pmu, event_or_pmu)) {
1692  			bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
1693  
1694  			if (!parse_events_add_pmu(parse_state, *listp, pmu,
1695  						  const_parsed_terms,
1696  						  auto_merge_stats)) {
1697  				ok++;
1698  				parse_state->wild_card_pmus = true;
1699  			}
1700  		}
1701  	}
1702  	if (ok)
1703  		return 0;
1704  
1705  	/* Failure to add, assume event_or_pmu is an event name. */
1706  	zfree(listp);
1707  	if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, const_parsed_terms, listp, loc))
1708  		return 0;
1709  
1710  	if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1711  		help = NULL;
1712  	parse_events_error__handle(parse_state->error, loc->first_column,
1713  				strdup("Bad event or PMU"),
1714  				help);
1715  	zfree(listp);
1716  	return -EINVAL;
1717  }
1718  
parse_events__set_leader(char * name,struct list_head * list)1719  void parse_events__set_leader(char *name, struct list_head *list)
1720  {
1721  	struct evsel *leader;
1722  
1723  	if (list_empty(list)) {
1724  		WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1725  		return;
1726  	}
1727  
1728  	leader = list_first_entry(list, struct evsel, core.node);
1729  	__perf_evlist__set_leader(list, &leader->core);
1730  	zfree(&leader->group_name);
1731  	leader->group_name = name;
1732  }
1733  
parse_events__modifier_list(struct parse_events_state * parse_state,YYLTYPE * loc,struct list_head * list,struct parse_events_modifier mod,bool group)1734  static int parse_events__modifier_list(struct parse_events_state *parse_state,
1735  				       YYLTYPE *loc,
1736  				       struct list_head *list,
1737  				       struct parse_events_modifier mod,
1738  				       bool group)
1739  {
1740  	struct evsel *evsel;
1741  
1742  	if (!group && mod.weak) {
1743  		parse_events_error__handle(parse_state->error, loc->first_column,
1744  					   strdup("Weak modifier is for use with groups"), NULL);
1745  		return -EINVAL;
1746  	}
1747  
1748  	__evlist__for_each_entry(list, evsel) {
1749  		/* Translate modifiers into the equivalent evsel excludes. */
1750  		int eu = group ? evsel->core.attr.exclude_user : 0;
1751  		int ek = group ? evsel->core.attr.exclude_kernel : 0;
1752  		int eh = group ? evsel->core.attr.exclude_hv : 0;
1753  		int eH = group ? evsel->core.attr.exclude_host : 0;
1754  		int eG = group ? evsel->core.attr.exclude_guest : 0;
1755  		int exclude = eu | ek | eh;
1756  		int exclude_GH = group ? evsel->exclude_GH : 0;
1757  
1758  		if (mod.precise) {
1759  			/* use of precise requires exclude_guest */
1760  			eG = 1;
1761  		}
1762  		if (mod.user) {
1763  			if (!exclude)
1764  				exclude = eu = ek = eh = 1;
1765  			if (!exclude_GH && !perf_guest)
1766  				eG = 1;
1767  			eu = 0;
1768  		}
1769  		if (mod.kernel) {
1770  			if (!exclude)
1771  				exclude = eu = ek = eh = 1;
1772  			ek = 0;
1773  		}
1774  		if (mod.hypervisor) {
1775  			if (!exclude)
1776  				exclude = eu = ek = eh = 1;
1777  			eh = 0;
1778  		}
1779  		if (mod.guest) {
1780  			if (!exclude_GH)
1781  				exclude_GH = eG = eH = 1;
1782  			eG = 0;
1783  		}
1784  		if (mod.host) {
1785  			if (!exclude_GH)
1786  				exclude_GH = eG = eH = 1;
1787  			eH = 0;
1788  		}
1789  		evsel->core.attr.exclude_user   = eu;
1790  		evsel->core.attr.exclude_kernel = ek;
1791  		evsel->core.attr.exclude_hv     = eh;
1792  		evsel->core.attr.exclude_host   = eH;
1793  		evsel->core.attr.exclude_guest  = eG;
1794  		evsel->exclude_GH               = exclude_GH;
1795  
1796  		/* Simple modifiers copied to the evsel. */
1797  		if (mod.precise) {
1798  			u8 precise = evsel->core.attr.precise_ip + mod.precise;
1799  			/*
1800  			 * precise ip:
1801  			 *
1802  			 *  0 - SAMPLE_IP can have arbitrary skid
1803  			 *  1 - SAMPLE_IP must have constant skid
1804  			 *  2 - SAMPLE_IP requested to have 0 skid
1805  			 *  3 - SAMPLE_IP must have 0 skid
1806  			 *
1807  			 *  See also PERF_RECORD_MISC_EXACT_IP
1808  			 */
1809  			if (precise > 3) {
1810  				char *help;
1811  
1812  				if (asprintf(&help,
1813  					     "Maximum combined precise value is 3, adding precision to \"%s\"",
1814  					     evsel__name(evsel)) > 0) {
1815  					parse_events_error__handle(parse_state->error,
1816  								   loc->first_column,
1817  								   help, NULL);
1818  				}
1819  				return -EINVAL;
1820  			}
1821  			evsel->core.attr.precise_ip = precise;
1822  		}
1823  		if (mod.precise_max)
1824  			evsel->precise_max = 1;
1825  		if (mod.non_idle)
1826  			evsel->core.attr.exclude_idle = 1;
1827  		if (mod.sample_read)
1828  			evsel->sample_read = 1;
1829  		if (mod.pinned && evsel__is_group_leader(evsel))
1830  			evsel->core.attr.pinned = 1;
1831  		if (mod.exclusive && evsel__is_group_leader(evsel))
1832  			evsel->core.attr.exclusive = 1;
1833  		if (mod.weak)
1834  			evsel->weak_group = true;
1835  		if (mod.bpf)
1836  			evsel->bpf_counter = true;
1837  		if (mod.retire_lat)
1838  			evsel->retire_lat = true;
1839  	}
1840  	return 0;
1841  }
1842  
parse_events__modifier_group(struct parse_events_state * parse_state,void * loc,struct list_head * list,struct parse_events_modifier mod)1843  int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1844  				 struct list_head *list,
1845  				 struct parse_events_modifier mod)
1846  {
1847  	return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1848  }
1849  
parse_events__modifier_event(struct parse_events_state * parse_state,void * loc,struct list_head * list,struct parse_events_modifier mod)1850  int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1851  				 struct list_head *list,
1852  				 struct parse_events_modifier mod)
1853  {
1854  	return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1855  }
1856  
parse_events__set_default_name(struct list_head * list,char * name)1857  int parse_events__set_default_name(struct list_head *list, char *name)
1858  {
1859  	struct evsel *evsel;
1860  	bool used_name = false;
1861  
1862  	__evlist__for_each_entry(list, evsel) {
1863  		if (!evsel->name) {
1864  			evsel->name = used_name ? strdup(name) : name;
1865  			used_name = true;
1866  			if (!evsel->name)
1867  				return -ENOMEM;
1868  		}
1869  	}
1870  	if (!used_name)
1871  		free(name);
1872  	return 0;
1873  }
1874  
parse_events__scanner(const char * str,FILE * input,struct parse_events_state * parse_state)1875  static int parse_events__scanner(const char *str,
1876  				 FILE *input,
1877  				 struct parse_events_state *parse_state)
1878  {
1879  	YY_BUFFER_STATE buffer;
1880  	void *scanner;
1881  	int ret;
1882  
1883  	ret = parse_events_lex_init_extra(parse_state, &scanner);
1884  	if (ret)
1885  		return ret;
1886  
1887  	if (str)
1888  		buffer = parse_events__scan_string(str, scanner);
1889  	else
1890  	        parse_events_set_in(input, scanner);
1891  
1892  #ifdef PARSER_DEBUG
1893  	parse_events_debug = 1;
1894  	parse_events_set_debug(1, scanner);
1895  #endif
1896  	ret = parse_events_parse(parse_state, scanner);
1897  
1898  	if (str) {
1899  		parse_events__flush_buffer(buffer, scanner);
1900  		parse_events__delete_buffer(buffer, scanner);
1901  	}
1902  	parse_events_lex_destroy(scanner);
1903  	return ret;
1904  }
1905  
1906  /*
1907   * parse event config string, return a list of event terms.
1908   */
parse_events_terms(struct parse_events_terms * terms,const char * str,FILE * input)1909  int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
1910  {
1911  	struct parse_events_state parse_state = {
1912  		.terms  = NULL,
1913  		.stoken = PE_START_TERMS,
1914  	};
1915  	int ret;
1916  
1917  	ret = parse_events__scanner(str, input, &parse_state);
1918  	if (!ret)
1919  		list_splice(&parse_state.terms->terms, &terms->terms);
1920  
1921  	zfree(&parse_state.terms);
1922  	return ret;
1923  }
1924  
evsel__compute_group_pmu_name(struct evsel * evsel,const struct list_head * head)1925  static int evsel__compute_group_pmu_name(struct evsel *evsel,
1926  					  const struct list_head *head)
1927  {
1928  	struct evsel *leader = evsel__leader(evsel);
1929  	struct evsel *pos;
1930  	const char *group_pmu_name;
1931  	struct perf_pmu *pmu = evsel__find_pmu(evsel);
1932  
1933  	if (!pmu) {
1934  		/*
1935  		 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1936  		 * is a core PMU, but in heterogeneous systems this is
1937  		 * unknown. For now pick the first core PMU.
1938  		 */
1939  		pmu = perf_pmus__scan_core(NULL);
1940  	}
1941  	if (!pmu) {
1942  		pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
1943  		return -EINVAL;
1944  	}
1945  	group_pmu_name = pmu->name;
1946  	/*
1947  	 * Software events may be in a group with other uncore PMU events. Use
1948  	 * the pmu_name of the first non-software event to avoid breaking the
1949  	 * software event out of the group.
1950  	 *
1951  	 * Aux event leaders, like intel_pt, expect a group with events from
1952  	 * other PMUs, so substitute the AUX event's PMU in this case.
1953  	 */
1954  	if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
1955  		struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
1956  
1957  		if (!leader_pmu) {
1958  			/* As with determining pmu above. */
1959  			leader_pmu = perf_pmus__scan_core(NULL);
1960  		}
1961  		/*
1962  		 * Starting with the leader, find the first event with a named
1963  		 * non-software PMU. for_each_group_(member|evsel) isn't used as
1964  		 * the list isn't yet sorted putting evsel's in the same group
1965  		 * together.
1966  		 */
1967  		if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
1968  			group_pmu_name = leader_pmu->name;
1969  		} else if (leader->core.nr_members > 1) {
1970  			list_for_each_entry(pos, head, core.node) {
1971  				struct perf_pmu *pos_pmu;
1972  
1973  				if (pos == leader || evsel__leader(pos) != leader)
1974  					continue;
1975  				pos_pmu = evsel__find_pmu(pos);
1976  				if (!pos_pmu) {
1977  					/* As with determining pmu above. */
1978  					pos_pmu = perf_pmus__scan_core(NULL);
1979  				}
1980  				if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
1981  					group_pmu_name = pos_pmu->name;
1982  					break;
1983  				}
1984  			}
1985  		}
1986  	}
1987  	/* Record computed name. */
1988  	evsel->group_pmu_name = strdup(group_pmu_name);
1989  	return evsel->group_pmu_name ? 0 : -ENOMEM;
1990  }
1991  
arch_evlist__cmp(const struct evsel * lhs,const struct evsel * rhs)1992  __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
1993  {
1994  	/* Order by insertion index. */
1995  	return lhs->core.idx - rhs->core.idx;
1996  }
1997  
evlist__cmp(void * _fg_idx,const struct list_head * l,const struct list_head * r)1998  static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
1999  {
2000  	const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2001  	const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2002  	const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2003  	const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2004  	int *force_grouped_idx = _fg_idx;
2005  	int lhs_sort_idx, rhs_sort_idx, ret;
2006  	const char *lhs_pmu_name, *rhs_pmu_name;
2007  	bool lhs_has_group, rhs_has_group;
2008  
2009  	/*
2010  	 * First sort by grouping/leader. Read the leader idx only if the evsel
2011  	 * is part of a group, by default ungrouped events will be sorted
2012  	 * relative to grouped events based on where the first ungrouped event
2013  	 * occurs. If both events don't have a group we want to fall-through to
2014  	 * the arch specific sorting, that can reorder and fix things like
2015  	 * Intel's topdown events.
2016  	 */
2017  	if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
2018  		lhs_has_group = true;
2019  		lhs_sort_idx = lhs_core->leader->idx;
2020  	} else {
2021  		lhs_has_group = false;
2022  		lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
2023  			? *force_grouped_idx
2024  			: lhs_core->idx;
2025  	}
2026  	if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
2027  		rhs_has_group = true;
2028  		rhs_sort_idx = rhs_core->leader->idx;
2029  	} else {
2030  		rhs_has_group = false;
2031  		rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
2032  			? *force_grouped_idx
2033  			: rhs_core->idx;
2034  	}
2035  
2036  	if (lhs_sort_idx != rhs_sort_idx)
2037  		return lhs_sort_idx - rhs_sort_idx;
2038  
2039  	/* Group by PMU if there is a group. Groups can't span PMUs. */
2040  	if (lhs_has_group && rhs_has_group) {
2041  		lhs_pmu_name = lhs->group_pmu_name;
2042  		rhs_pmu_name = rhs->group_pmu_name;
2043  		ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2044  		if (ret)
2045  			return ret;
2046  	}
2047  
2048  	/* Architecture specific sorting. */
2049  	return arch_evlist__cmp(lhs, rhs);
2050  }
2051  
parse_events__sort_events_and_fix_groups(struct list_head * list)2052  static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2053  {
2054  	int idx = 0, force_grouped_idx = -1;
2055  	struct evsel *pos, *cur_leader = NULL;
2056  	struct perf_evsel *cur_leaders_grp = NULL;
2057  	bool idx_changed = false, cur_leader_force_grouped = false;
2058  	int orig_num_leaders = 0, num_leaders = 0;
2059  	int ret;
2060  
2061  	/*
2062  	 * Compute index to insert ungrouped events at. Place them where the
2063  	 * first ungrouped event appears.
2064  	 */
2065  	list_for_each_entry(pos, list, core.node) {
2066  		const struct evsel *pos_leader = evsel__leader(pos);
2067  
2068  		ret = evsel__compute_group_pmu_name(pos, list);
2069  		if (ret)
2070  			return ret;
2071  
2072  		if (pos == pos_leader)
2073  			orig_num_leaders++;
2074  
2075  		/*
2076  		 * Ensure indexes are sequential, in particular for multiple
2077  		 * event lists being merged. The indexes are used to detect when
2078  		 * the user order is modified.
2079  		 */
2080  		pos->core.idx = idx++;
2081  
2082  		/* Remember an index to sort all forced grouped events together to. */
2083  		if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
2084  		    arch_evsel__must_be_in_group(pos))
2085  			force_grouped_idx = pos->core.idx;
2086  	}
2087  
2088  	/* Sort events. */
2089  	list_sort(&force_grouped_idx, list, evlist__cmp);
2090  
2091  	/*
2092  	 * Recompute groups, splitting for PMUs and adding groups for events
2093  	 * that require them.
2094  	 */
2095  	idx = 0;
2096  	list_for_each_entry(pos, list, core.node) {
2097  		const struct evsel *pos_leader = evsel__leader(pos);
2098  		const char *pos_pmu_name = pos->group_pmu_name;
2099  		const char *cur_leader_pmu_name;
2100  		bool pos_force_grouped = force_grouped_idx != -1 &&
2101  			arch_evsel__must_be_in_group(pos);
2102  
2103  		/* Reset index and nr_members. */
2104  		if (pos->core.idx != idx)
2105  			idx_changed = true;
2106  		pos->core.idx = idx++;
2107  		pos->core.nr_members = 0;
2108  
2109  		/*
2110  		 * Set the group leader respecting the given groupings and that
2111  		 * groups can't span PMUs.
2112  		 */
2113  		if (!cur_leader)
2114  			cur_leader = pos;
2115  
2116  		cur_leader_pmu_name = cur_leader->group_pmu_name;
2117  		if ((cur_leaders_grp != pos->core.leader &&
2118  		     (!pos_force_grouped || !cur_leader_force_grouped)) ||
2119  		    strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2120  			/* Event is for a different group/PMU than last. */
2121  			cur_leader = pos;
2122  			/*
2123  			 * Remember the leader's group before it is overwritten,
2124  			 * so that later events match as being in the same
2125  			 * group.
2126  			 */
2127  			cur_leaders_grp = pos->core.leader;
2128  			/*
2129  			 * Avoid forcing events into groups with events that
2130  			 * don't need to be in the group.
2131  			 */
2132  			cur_leader_force_grouped = pos_force_grouped;
2133  		}
2134  		if (pos_leader != cur_leader) {
2135  			/* The leader changed so update it. */
2136  			evsel__set_leader(pos, cur_leader);
2137  		}
2138  	}
2139  	list_for_each_entry(pos, list, core.node) {
2140  		struct evsel *pos_leader = evsel__leader(pos);
2141  
2142  		if (pos == pos_leader)
2143  			num_leaders++;
2144  		pos_leader->core.nr_members++;
2145  	}
2146  	return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2147  }
2148  
__parse_events(struct evlist * evlist,const char * str,const char * pmu_filter,struct parse_events_error * err,bool fake_pmu,bool warn_if_reordered,bool fake_tp)2149  int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2150  		   struct parse_events_error *err, bool fake_pmu,
2151  		   bool warn_if_reordered, bool fake_tp)
2152  {
2153  	struct parse_events_state parse_state = {
2154  		.list	  = LIST_HEAD_INIT(parse_state.list),
2155  		.idx	  = evlist->core.nr_entries,
2156  		.error	  = err,
2157  		.stoken	  = PE_START_EVENTS,
2158  		.fake_pmu = fake_pmu,
2159  		.fake_tp  = fake_tp,
2160  		.pmu_filter = pmu_filter,
2161  		.match_legacy_cache_terms = true,
2162  	};
2163  	int ret, ret2;
2164  
2165  	ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
2166  
2167  	if (!ret && list_empty(&parse_state.list)) {
2168  		WARN_ONCE(true, "WARNING: event parser found nothing\n");
2169  		return -1;
2170  	}
2171  
2172  	ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2173  	if (ret2 < 0)
2174  		return ret;
2175  
2176  	if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
2177  		pr_warning("WARNING: events were regrouped to match PMUs\n");
2178  
2179  	/*
2180  	 * Add list to the evlist even with errors to allow callers to clean up.
2181  	 */
2182  	evlist__splice_list_tail(evlist, &parse_state.list);
2183  
2184  	if (!ret) {
2185  		struct evsel *last;
2186  
2187  		last = evlist__last(evlist);
2188  		last->cmdline_group_boundary = true;
2189  
2190  		return 0;
2191  	}
2192  
2193  	/*
2194  	 * There are 2 users - builtin-record and builtin-test objects.
2195  	 * Both call evlist__delete in case of error, so we dont
2196  	 * need to bother.
2197  	 */
2198  	return ret;
2199  }
2200  
parse_event(struct evlist * evlist,const char * str)2201  int parse_event(struct evlist *evlist, const char *str)
2202  {
2203  	struct parse_events_error err;
2204  	int ret;
2205  
2206  	parse_events_error__init(&err);
2207  	ret = parse_events(evlist, str, &err);
2208  	parse_events_error__exit(&err);
2209  	return ret;
2210  }
2211  
2212  struct parse_events_error_entry {
2213  	/** @list: The list the error is part of. */
2214  	struct list_head list;
2215  	/** @idx: index in the parsed string */
2216  	int   idx;
2217  	/** @str: string to display at the index */
2218  	char *str;
2219  	/** @help: optional help string */
2220  	char *help;
2221  };
2222  
parse_events_error__init(struct parse_events_error * err)2223  void parse_events_error__init(struct parse_events_error *err)
2224  {
2225  	INIT_LIST_HEAD(&err->list);
2226  }
2227  
parse_events_error__exit(struct parse_events_error * err)2228  void parse_events_error__exit(struct parse_events_error *err)
2229  {
2230  	struct parse_events_error_entry *pos, *tmp;
2231  
2232  	list_for_each_entry_safe(pos, tmp, &err->list, list) {
2233  		zfree(&pos->str);
2234  		zfree(&pos->help);
2235  		list_del_init(&pos->list);
2236  		free(pos);
2237  	}
2238  }
2239  
parse_events_error__handle(struct parse_events_error * err,int idx,char * str,char * help)2240  void parse_events_error__handle(struct parse_events_error *err, int idx,
2241  				char *str, char *help)
2242  {
2243  	struct parse_events_error_entry *entry;
2244  
2245  	if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2246  		goto out_free;
2247  
2248  	entry = zalloc(sizeof(*entry));
2249  	if (!entry) {
2250  		pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2251  			str, help ?: "<no help>");
2252  		goto out_free;
2253  	}
2254  	entry->idx = idx;
2255  	entry->str = str;
2256  	entry->help = help;
2257  	list_add(&entry->list, &err->list);
2258  	return;
2259  out_free:
2260  	free(str);
2261  	free(help);
2262  }
2263  
2264  #define MAX_WIDTH 1000
get_term_width(void)2265  static int get_term_width(void)
2266  {
2267  	struct winsize ws;
2268  
2269  	get_term_dimensions(&ws);
2270  	return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2271  }
2272  
__parse_events_error__print(int err_idx,const char * err_str,const char * err_help,const char * event)2273  static void __parse_events_error__print(int err_idx, const char *err_str,
2274  					const char *err_help, const char *event)
2275  {
2276  	const char *str = "invalid or unsupported event: ";
2277  	char _buf[MAX_WIDTH];
2278  	char *buf = (char *) event;
2279  	int idx = 0;
2280  	if (err_str) {
2281  		/* -2 for extra '' in the final fprintf */
2282  		int width       = get_term_width() - 2;
2283  		int len_event   = strlen(event);
2284  		int len_str, max_len, cut = 0;
2285  
2286  		/*
2287  		 * Maximum error index indent, we will cut
2288  		 * the event string if it's bigger.
2289  		 */
2290  		int max_err_idx = 13;
2291  
2292  		/*
2293  		 * Let's be specific with the message when
2294  		 * we have the precise error.
2295  		 */
2296  		str     = "event syntax error: ";
2297  		len_str = strlen(str);
2298  		max_len = width - len_str;
2299  
2300  		buf = _buf;
2301  
2302  		/* We're cutting from the beginning. */
2303  		if (err_idx > max_err_idx)
2304  			cut = err_idx - max_err_idx;
2305  
2306  		strncpy(buf, event + cut, max_len);
2307  
2308  		/* Mark cut parts with '..' on both sides. */
2309  		if (cut)
2310  			buf[0] = buf[1] = '.';
2311  
2312  		if ((len_event - cut) > max_len) {
2313  			buf[max_len - 1] = buf[max_len - 2] = '.';
2314  			buf[max_len] = 0;
2315  		}
2316  
2317  		idx = len_str + err_idx - cut;
2318  	}
2319  
2320  	fprintf(stderr, "%s'%s'\n", str, buf);
2321  	if (idx) {
2322  		fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2323  		if (err_help)
2324  			fprintf(stderr, "\n%s\n", err_help);
2325  	}
2326  }
2327  
parse_events_error__print(const struct parse_events_error * err,const char * event)2328  void parse_events_error__print(const struct parse_events_error *err,
2329  			       const char *event)
2330  {
2331  	struct parse_events_error_entry *pos;
2332  	bool first = true;
2333  
2334  	list_for_each_entry(pos, &err->list, list) {
2335  		if (!first)
2336  			fputs("\n", stderr);
2337  		__parse_events_error__print(pos->idx, pos->str, pos->help, event);
2338  		first = false;
2339  	}
2340  }
2341  
2342  /*
2343   * In the list of errors err, do any of the error strings (str) contain the
2344   * given needle string?
2345   */
parse_events_error__contains(const struct parse_events_error * err,const char * needle)2346  bool parse_events_error__contains(const struct parse_events_error *err,
2347  				  const char *needle)
2348  {
2349  	struct parse_events_error_entry *pos;
2350  
2351  	list_for_each_entry(pos, &err->list, list) {
2352  		if (strstr(pos->str, needle) != NULL)
2353  			return true;
2354  	}
2355  	return false;
2356  }
2357  
2358  #undef MAX_WIDTH
2359  
parse_events_option(const struct option * opt,const char * str,int unset __maybe_unused)2360  int parse_events_option(const struct option *opt, const char *str,
2361  			int unset __maybe_unused)
2362  {
2363  	struct parse_events_option_args *args = opt->value;
2364  	struct parse_events_error err;
2365  	int ret;
2366  
2367  	parse_events_error__init(&err);
2368  	ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2369  			     /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2370  			     /*fake_tp=*/false);
2371  
2372  	if (ret) {
2373  		parse_events_error__print(&err, str);
2374  		fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2375  	}
2376  	parse_events_error__exit(&err);
2377  
2378  	return ret;
2379  }
2380  
parse_events_option_new_evlist(const struct option * opt,const char * str,int unset)2381  int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2382  {
2383  	struct parse_events_option_args *args = opt->value;
2384  	int ret;
2385  
2386  	if (*args->evlistp == NULL) {
2387  		*args->evlistp = evlist__new();
2388  
2389  		if (*args->evlistp == NULL) {
2390  			fprintf(stderr, "Not enough memory to create evlist\n");
2391  			return -1;
2392  		}
2393  	}
2394  	ret = parse_events_option(opt, str, unset);
2395  	if (ret) {
2396  		evlist__delete(*args->evlistp);
2397  		*args->evlistp = NULL;
2398  	}
2399  
2400  	return ret;
2401  }
2402  
2403  static int
foreach_evsel_in_last_glob(struct evlist * evlist,int (* func)(struct evsel * evsel,const void * arg),const void * arg)2404  foreach_evsel_in_last_glob(struct evlist *evlist,
2405  			   int (*func)(struct evsel *evsel,
2406  				       const void *arg),
2407  			   const void *arg)
2408  {
2409  	struct evsel *last = NULL;
2410  	int err;
2411  
2412  	/*
2413  	 * Don't return when list_empty, give func a chance to report
2414  	 * error when it found last == NULL.
2415  	 *
2416  	 * So no need to WARN here, let *func do this.
2417  	 */
2418  	if (evlist->core.nr_entries > 0)
2419  		last = evlist__last(evlist);
2420  
2421  	do {
2422  		err = (*func)(last, arg);
2423  		if (err)
2424  			return -1;
2425  		if (!last)
2426  			return 0;
2427  
2428  		if (last->core.node.prev == &evlist->core.entries)
2429  			return 0;
2430  		last = list_entry(last->core.node.prev, struct evsel, core.node);
2431  	} while (!last->cmdline_group_boundary);
2432  
2433  	return 0;
2434  }
2435  
set_filter(struct evsel * evsel,const void * arg)2436  static int set_filter(struct evsel *evsel, const void *arg)
2437  {
2438  	const char *str = arg;
2439  	bool found = false;
2440  	int nr_addr_filters = 0;
2441  	struct perf_pmu *pmu = NULL;
2442  
2443  	if (evsel == NULL) {
2444  		fprintf(stderr,
2445  			"--filter option should follow a -e tracepoint or HW tracer option\n");
2446  		return -1;
2447  	}
2448  
2449  	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2450  		if (evsel__append_tp_filter(evsel, str) < 0) {
2451  			fprintf(stderr,
2452  				"not enough memory to hold filter string\n");
2453  			return -1;
2454  		}
2455  
2456  		return 0;
2457  	}
2458  
2459  	while ((pmu = perf_pmus__scan(pmu)) != NULL)
2460  		if (pmu->type == evsel->core.attr.type) {
2461  			found = true;
2462  			break;
2463  		}
2464  
2465  	if (found)
2466  		perf_pmu__scan_file(pmu, "nr_addr_filters",
2467  				    "%d", &nr_addr_filters);
2468  
2469  	if (!nr_addr_filters)
2470  		return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2471  
2472  	if (evsel__append_addr_filter(evsel, str) < 0) {
2473  		fprintf(stderr,
2474  			"not enough memory to hold filter string\n");
2475  		return -1;
2476  	}
2477  
2478  	return 0;
2479  }
2480  
parse_filter(const struct option * opt,const char * str,int unset __maybe_unused)2481  int parse_filter(const struct option *opt, const char *str,
2482  		 int unset __maybe_unused)
2483  {
2484  	struct evlist *evlist = *(struct evlist **)opt->value;
2485  
2486  	return foreach_evsel_in_last_glob(evlist, set_filter,
2487  					  (const void *)str);
2488  }
2489  
add_exclude_perf_filter(struct evsel * evsel,const void * arg __maybe_unused)2490  static int add_exclude_perf_filter(struct evsel *evsel,
2491  				   const void *arg __maybe_unused)
2492  {
2493  	char new_filter[64];
2494  
2495  	if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2496  		fprintf(stderr,
2497  			"--exclude-perf option should follow a -e tracepoint option\n");
2498  		return -1;
2499  	}
2500  
2501  	snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2502  
2503  	if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2504  		fprintf(stderr,
2505  			"not enough memory to hold filter string\n");
2506  		return -1;
2507  	}
2508  
2509  	return 0;
2510  }
2511  
exclude_perf(const struct option * opt,const char * arg __maybe_unused,int unset __maybe_unused)2512  int exclude_perf(const struct option *opt,
2513  		 const char *arg __maybe_unused,
2514  		 int unset __maybe_unused)
2515  {
2516  	struct evlist *evlist = *(struct evlist **)opt->value;
2517  
2518  	return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2519  					  NULL);
2520  }
2521  
parse_events__is_hardcoded_term(struct parse_events_term * term)2522  int parse_events__is_hardcoded_term(struct parse_events_term *term)
2523  {
2524  	return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2525  }
2526  
new_term(struct parse_events_term ** _term,struct parse_events_term * temp,char * str,u64 num)2527  static int new_term(struct parse_events_term **_term,
2528  		    struct parse_events_term *temp,
2529  		    char *str, u64 num)
2530  {
2531  	struct parse_events_term *term;
2532  
2533  	term = malloc(sizeof(*term));
2534  	if (!term)
2535  		return -ENOMEM;
2536  
2537  	*term = *temp;
2538  	INIT_LIST_HEAD(&term->list);
2539  	term->weak = false;
2540  
2541  	switch (term->type_val) {
2542  	case PARSE_EVENTS__TERM_TYPE_NUM:
2543  		term->val.num = num;
2544  		break;
2545  	case PARSE_EVENTS__TERM_TYPE_STR:
2546  		term->val.str = str;
2547  		break;
2548  	default:
2549  		free(term);
2550  		return -EINVAL;
2551  	}
2552  
2553  	*_term = term;
2554  	return 0;
2555  }
2556  
parse_events_term__num(struct parse_events_term ** term,enum parse_events__term_type type_term,const char * config,u64 num,bool no_value,void * loc_term_,void * loc_val_)2557  int parse_events_term__num(struct parse_events_term **term,
2558  			   enum parse_events__term_type type_term,
2559  			   const char *config, u64 num,
2560  			   bool no_value,
2561  			   void *loc_term_, void *loc_val_)
2562  {
2563  	YYLTYPE *loc_term = loc_term_;
2564  	YYLTYPE *loc_val = loc_val_;
2565  
2566  	struct parse_events_term temp = {
2567  		.type_val  = PARSE_EVENTS__TERM_TYPE_NUM,
2568  		.type_term = type_term,
2569  		.config    = config ? : strdup(config_term_name(type_term)),
2570  		.no_value  = no_value,
2571  		.err_term  = loc_term ? loc_term->first_column : 0,
2572  		.err_val   = loc_val  ? loc_val->first_column  : 0,
2573  	};
2574  
2575  	return new_term(term, &temp, /*str=*/NULL, num);
2576  }
2577  
parse_events_term__str(struct parse_events_term ** term,enum parse_events__term_type type_term,char * config,char * str,void * loc_term_,void * loc_val_)2578  int parse_events_term__str(struct parse_events_term **term,
2579  			   enum parse_events__term_type type_term,
2580  			   char *config, char *str,
2581  			   void *loc_term_, void *loc_val_)
2582  {
2583  	YYLTYPE *loc_term = loc_term_;
2584  	YYLTYPE *loc_val = loc_val_;
2585  
2586  	struct parse_events_term temp = {
2587  		.type_val  = PARSE_EVENTS__TERM_TYPE_STR,
2588  		.type_term = type_term,
2589  		.config    = config,
2590  		.err_term  = loc_term ? loc_term->first_column : 0,
2591  		.err_val   = loc_val  ? loc_val->first_column  : 0,
2592  	};
2593  
2594  	return new_term(term, &temp, str, /*num=*/0);
2595  }
2596  
parse_events_term__term(struct parse_events_term ** term,enum parse_events__term_type term_lhs,enum parse_events__term_type term_rhs,void * loc_term,void * loc_val)2597  int parse_events_term__term(struct parse_events_term **term,
2598  			    enum parse_events__term_type term_lhs,
2599  			    enum parse_events__term_type term_rhs,
2600  			    void *loc_term, void *loc_val)
2601  {
2602  	return parse_events_term__str(term, term_lhs, NULL,
2603  				      strdup(config_term_name(term_rhs)),
2604  				      loc_term, loc_val);
2605  }
2606  
parse_events_term__clone(struct parse_events_term ** new,const struct parse_events_term * term)2607  int parse_events_term__clone(struct parse_events_term **new,
2608  			     const struct parse_events_term *term)
2609  {
2610  	char *str;
2611  	struct parse_events_term temp = *term;
2612  
2613  	temp.used = false;
2614  	if (term->config) {
2615  		temp.config = strdup(term->config);
2616  		if (!temp.config)
2617  			return -ENOMEM;
2618  	}
2619  	if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2620  		return new_term(new, &temp, /*str=*/NULL, term->val.num);
2621  
2622  	str = strdup(term->val.str);
2623  	if (!str) {
2624  		zfree(&temp.config);
2625  		return -ENOMEM;
2626  	}
2627  	return new_term(new, &temp, str, /*num=*/0);
2628  }
2629  
parse_events_term__delete(struct parse_events_term * term)2630  void parse_events_term__delete(struct parse_events_term *term)
2631  {
2632  	if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2633  		zfree(&term->val.str);
2634  
2635  	zfree(&term->config);
2636  	free(term);
2637  }
2638  
parse_events_terms__copy(const struct parse_events_terms * src,struct parse_events_terms * dest)2639  static int parse_events_terms__copy(const struct parse_events_terms *src,
2640  				    struct parse_events_terms *dest)
2641  {
2642  	struct parse_events_term *term;
2643  
2644  	list_for_each_entry (term, &src->terms, list) {
2645  		struct parse_events_term *n;
2646  		int ret;
2647  
2648  		ret = parse_events_term__clone(&n, term);
2649  		if (ret)
2650  			return ret;
2651  
2652  		list_add_tail(&n->list, &dest->terms);
2653  	}
2654  	return 0;
2655  }
2656  
parse_events_terms__init(struct parse_events_terms * terms)2657  void parse_events_terms__init(struct parse_events_terms *terms)
2658  {
2659  	INIT_LIST_HEAD(&terms->terms);
2660  }
2661  
parse_events_terms__exit(struct parse_events_terms * terms)2662  void parse_events_terms__exit(struct parse_events_terms *terms)
2663  {
2664  	struct parse_events_term *term, *h;
2665  
2666  	list_for_each_entry_safe(term, h, &terms->terms, list) {
2667  		list_del_init(&term->list);
2668  		parse_events_term__delete(term);
2669  	}
2670  }
2671  
parse_events_terms__delete(struct parse_events_terms * terms)2672  void parse_events_terms__delete(struct parse_events_terms *terms)
2673  {
2674  	if (!terms)
2675  		return;
2676  	parse_events_terms__exit(terms);
2677  	free(terms);
2678  }
2679  
parse_events_terms__to_strbuf(const struct parse_events_terms * terms,struct strbuf * sb)2680  int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2681  {
2682  	struct parse_events_term *term;
2683  	bool first = true;
2684  
2685  	if (!terms)
2686  		return 0;
2687  
2688  	list_for_each_entry(term, &terms->terms, list) {
2689  		int ret;
2690  
2691  		if (!first) {
2692  			ret = strbuf_addch(sb, ',');
2693  			if (ret < 0)
2694  				return ret;
2695  		}
2696  		first = false;
2697  
2698  		if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2699  			if (term->no_value) {
2700  				assert(term->val.num == 1);
2701  				ret = strbuf_addf(sb, "%s", term->config);
2702  			} else
2703  				ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2704  		else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2705  			if (term->config) {
2706  				ret = strbuf_addf(sb, "%s=", term->config);
2707  				if (ret < 0)
2708  					return ret;
2709  			} else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2710  				ret = strbuf_addf(sb, "%s=", config_term_name(term->type_term));
2711  				if (ret < 0)
2712  					return ret;
2713  			}
2714  			assert(!term->no_value);
2715  			ret = strbuf_addf(sb, "%s", term->val.str);
2716  		}
2717  		if (ret < 0)
2718  			return ret;
2719  	}
2720  	return 0;
2721  }
2722  
config_terms_list(char * buf,size_t buf_sz)2723  static void config_terms_list(char *buf, size_t buf_sz)
2724  {
2725  	int i;
2726  	bool first = true;
2727  
2728  	buf[0] = '\0';
2729  	for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2730  		const char *name = config_term_name(i);
2731  
2732  		if (!config_term_avail(i, NULL))
2733  			continue;
2734  		if (!name)
2735  			continue;
2736  		if (name[0] == '<')
2737  			continue;
2738  
2739  		if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2740  			return;
2741  
2742  		if (!first)
2743  			strcat(buf, ",");
2744  		else
2745  			first = false;
2746  		strcat(buf, name);
2747  	}
2748  }
2749  
2750  /*
2751   * Return string contains valid config terms of an event.
2752   * @additional_terms: For terms such as PMU sysfs terms.
2753   */
parse_events_formats_error_string(char * additional_terms)2754  char *parse_events_formats_error_string(char *additional_terms)
2755  {
2756  	char *str;
2757  	/* "no-overwrite" is the longest name */
2758  	char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2759  			  (sizeof("no-overwrite") - 1)];
2760  
2761  	config_terms_list(static_terms, sizeof(static_terms));
2762  	/* valid terms */
2763  	if (additional_terms) {
2764  		if (asprintf(&str, "valid terms: %s,%s",
2765  			     additional_terms, static_terms) < 0)
2766  			goto fail;
2767  	} else {
2768  		if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2769  			goto fail;
2770  	}
2771  	return str;
2772  
2773  fail:
2774  	return NULL;
2775  }
2776