1  // SPDX-License-Identifier: GPL-2.0
2  #include <asm/bug.h>
3  #include <linux/kernel.h>
4  #include <linux/string.h>
5  #include <linux/zalloc.h>
6  #include <sys/time.h>
7  #include <sys/resource.h>
8  #include <sys/types.h>
9  #include <sys/stat.h>
10  #include <unistd.h>
11  #include <errno.h>
12  #include <fcntl.h>
13  #include <stdlib.h>
14  #ifdef HAVE_LIBBPF_SUPPORT
15  #include <bpf/libbpf.h>
16  #include "bpf-event.h"
17  #include "bpf-utils.h"
18  #endif
19  #include "compress.h"
20  #include "env.h"
21  #include "namespaces.h"
22  #include "path.h"
23  #include "map.h"
24  #include "symbol.h"
25  #include "srcline.h"
26  #include "dso.h"
27  #include "dsos.h"
28  #include "machine.h"
29  #include "auxtrace.h"
30  #include "util.h" /* O_CLOEXEC for older systems */
31  #include "debug.h"
32  #include "string2.h"
33  #include "vdso.h"
34  #include "annotate-data.h"
35  
36  static const char * const debuglink_paths[] = {
37  	"%.0s%s",
38  	"%s/%s",
39  	"%s/.debug/%s",
40  	"/usr/lib/debug%s/%s"
41  };
42  
dso__set_nsinfo(struct dso * dso,struct nsinfo * nsi)43  void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi)
44  {
45  	nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo);
46  	RC_CHK_ACCESS(dso)->nsinfo = nsi;
47  }
48  
dso__symtab_origin(const struct dso * dso)49  char dso__symtab_origin(const struct dso *dso)
50  {
51  	static const char origin[] = {
52  		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
53  		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
54  		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
55  		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
56  		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
57  		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
58  		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
59  		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
60  		[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO]	= 'x',
61  		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
62  		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
63  		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
64  		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
65  		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
66  		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
67  		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
68  		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
69  		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
70  	};
71  
72  	if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND)
73  		return '!';
74  	return origin[dso__symtab_type(dso)];
75  }
76  
dso__is_object_file(const struct dso * dso)77  bool dso__is_object_file(const struct dso *dso)
78  {
79  	switch (dso__binary_type(dso)) {
80  	case DSO_BINARY_TYPE__KALLSYMS:
81  	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
82  	case DSO_BINARY_TYPE__JAVA_JIT:
83  	case DSO_BINARY_TYPE__BPF_PROG_INFO:
84  	case DSO_BINARY_TYPE__BPF_IMAGE:
85  	case DSO_BINARY_TYPE__OOL:
86  		return false;
87  	case DSO_BINARY_TYPE__VMLINUX:
88  	case DSO_BINARY_TYPE__GUEST_VMLINUX:
89  	case DSO_BINARY_TYPE__DEBUGLINK:
90  	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
91  	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
92  	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
93  	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
94  	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
95  	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
96  	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
97  	case DSO_BINARY_TYPE__GUEST_KMODULE:
98  	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
99  	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
100  	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
101  	case DSO_BINARY_TYPE__KCORE:
102  	case DSO_BINARY_TYPE__GUEST_KCORE:
103  	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
104  	case DSO_BINARY_TYPE__NOT_FOUND:
105  	default:
106  		return true;
107  	}
108  }
109  
dso__read_binary_type_filename(const struct dso * dso,enum dso_binary_type type,char * root_dir,char * filename,size_t size)110  int dso__read_binary_type_filename(const struct dso *dso,
111  				   enum dso_binary_type type,
112  				   char *root_dir, char *filename, size_t size)
113  {
114  	char build_id_hex[SBUILD_ID_SIZE];
115  	int ret = 0;
116  	size_t len;
117  
118  	switch (type) {
119  	case DSO_BINARY_TYPE__DEBUGLINK:
120  	{
121  		const char *last_slash;
122  		char dso_dir[PATH_MAX];
123  		char symfile[PATH_MAX];
124  		unsigned int i;
125  
126  		len = __symbol__join_symfs(filename, size, dso__long_name(dso));
127  		last_slash = filename + len;
128  		while (last_slash != filename && *last_slash != '/')
129  			last_slash--;
130  
131  		strncpy(dso_dir, filename, last_slash - filename);
132  		dso_dir[last_slash-filename] = '\0';
133  
134  		if (!is_regular_file(filename)) {
135  			ret = -1;
136  			break;
137  		}
138  
139  		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
140  		if (ret)
141  			break;
142  
143  		/* Check predefined locations where debug file might reside */
144  		ret = -1;
145  		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
146  			snprintf(filename, size,
147  					debuglink_paths[i], dso_dir, symfile);
148  			if (is_regular_file(filename)) {
149  				ret = 0;
150  				break;
151  			}
152  		}
153  
154  		break;
155  	}
156  	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
157  		if (dso__build_id_filename(dso, filename, size, false) == NULL)
158  			ret = -1;
159  		break;
160  
161  	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
162  		if (dso__build_id_filename(dso, filename, size, true) == NULL)
163  			ret = -1;
164  		break;
165  
166  	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
167  		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
168  		snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso));
169  		break;
170  
171  	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
172  		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
173  		snprintf(filename + len, size - len, "%s", dso__long_name(dso));
174  		break;
175  
176  	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
177  		/*
178  		 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
179  		 * /usr/lib/debug/lib when it is expected to be in
180  		 * /usr/lib/debug/usr/lib
181  		 */
182  		if (strlen(dso__long_name(dso)) < 9 ||
183  		    strncmp(dso__long_name(dso), "/usr/lib/", 9)) {
184  			ret = -1;
185  			break;
186  		}
187  		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
188  		snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4);
189  		break;
190  
191  	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
192  	{
193  		const char *last_slash;
194  		size_t dir_size;
195  
196  		last_slash = dso__long_name(dso) + dso__long_name_len(dso);
197  		while (last_slash != dso__long_name(dso) && *last_slash != '/')
198  			last_slash--;
199  
200  		len = __symbol__join_symfs(filename, size, "");
201  		dir_size = last_slash - dso__long_name(dso) + 2;
202  		if (dir_size > (size - len)) {
203  			ret = -1;
204  			break;
205  		}
206  		len += scnprintf(filename + len, dir_size, "%s",  dso__long_name(dso));
207  		len += scnprintf(filename + len , size - len, ".debug%s",
208  								last_slash);
209  		break;
210  	}
211  
212  	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
213  		if (!dso__has_build_id(dso)) {
214  			ret = -1;
215  			break;
216  		}
217  
218  		build_id__sprintf(dso__bid_const(dso), build_id_hex);
219  		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
220  		snprintf(filename + len, size - len, "%.2s/%s.debug",
221  			 build_id_hex, build_id_hex + 2);
222  		break;
223  
224  	case DSO_BINARY_TYPE__VMLINUX:
225  	case DSO_BINARY_TYPE__GUEST_VMLINUX:
226  	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
227  		__symbol__join_symfs(filename, size, dso__long_name(dso));
228  		break;
229  
230  	case DSO_BINARY_TYPE__GUEST_KMODULE:
231  	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
232  		path__join3(filename, size, symbol_conf.symfs,
233  			    root_dir, dso__long_name(dso));
234  		break;
235  
236  	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
237  	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
238  		__symbol__join_symfs(filename, size, dso__long_name(dso));
239  		break;
240  
241  	case DSO_BINARY_TYPE__KCORE:
242  	case DSO_BINARY_TYPE__GUEST_KCORE:
243  		snprintf(filename, size, "%s", dso__long_name(dso));
244  		break;
245  
246  	default:
247  	case DSO_BINARY_TYPE__KALLSYMS:
248  	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
249  	case DSO_BINARY_TYPE__JAVA_JIT:
250  	case DSO_BINARY_TYPE__BPF_PROG_INFO:
251  	case DSO_BINARY_TYPE__BPF_IMAGE:
252  	case DSO_BINARY_TYPE__OOL:
253  	case DSO_BINARY_TYPE__NOT_FOUND:
254  		ret = -1;
255  		break;
256  	}
257  
258  	return ret;
259  }
260  
261  enum {
262  	COMP_ID__NONE = 0,
263  };
264  
265  static const struct {
266  	const char *fmt;
267  	int (*decompress)(const char *input, int output);
268  	bool (*is_compressed)(const char *input);
269  } compressions[] = {
270  	[COMP_ID__NONE] = { .fmt = NULL, },
271  #ifdef HAVE_ZLIB_SUPPORT
272  	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
273  #endif
274  #ifdef HAVE_LZMA_SUPPORT
275  	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
276  #endif
277  	{ NULL, NULL, NULL },
278  };
279  
is_supported_compression(const char * ext)280  static int is_supported_compression(const char *ext)
281  {
282  	unsigned i;
283  
284  	for (i = 1; compressions[i].fmt; i++) {
285  		if (!strcmp(ext, compressions[i].fmt))
286  			return i;
287  	}
288  	return COMP_ID__NONE;
289  }
290  
is_kernel_module(const char * pathname,int cpumode)291  bool is_kernel_module(const char *pathname, int cpumode)
292  {
293  	struct kmod_path m;
294  	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
295  
296  	WARN_ONCE(mode != cpumode,
297  		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
298  		  cpumode);
299  
300  	switch (mode) {
301  	case PERF_RECORD_MISC_USER:
302  	case PERF_RECORD_MISC_HYPERVISOR:
303  	case PERF_RECORD_MISC_GUEST_USER:
304  		return false;
305  	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
306  	default:
307  		if (kmod_path__parse(&m, pathname)) {
308  			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
309  					pathname);
310  			return true;
311  		}
312  	}
313  
314  	return m.kmod;
315  }
316  
dso__needs_decompress(struct dso * dso)317  bool dso__needs_decompress(struct dso *dso)
318  {
319  	return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
320  		dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
321  }
322  
filename__decompress(const char * name,char * pathname,size_t len,int comp,int * err)323  int filename__decompress(const char *name, char *pathname,
324  			 size_t len, int comp, int *err)
325  {
326  	char tmpbuf[] = KMOD_DECOMP_NAME;
327  	int fd = -1;
328  
329  	/*
330  	 * We have proper compression id for DSO and yet the file
331  	 * behind the 'name' can still be plain uncompressed object.
332  	 *
333  	 * The reason is behind the logic we open the DSO object files,
334  	 * when we try all possible 'debug' objects until we find the
335  	 * data. So even if the DSO is represented by 'krava.xz' module,
336  	 * we can end up here opening ~/.debug/....23432432/debug' file
337  	 * which is not compressed.
338  	 *
339  	 * To keep this transparent, we detect this and return the file
340  	 * descriptor to the uncompressed file.
341  	 */
342  	if (!compressions[comp].is_compressed(name))
343  		return open(name, O_RDONLY);
344  
345  	fd = mkstemp(tmpbuf);
346  	if (fd < 0) {
347  		*err = errno;
348  		return -1;
349  	}
350  
351  	if (compressions[comp].decompress(name, fd)) {
352  		*err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
353  		close(fd);
354  		fd = -1;
355  	}
356  
357  	if (!pathname || (fd < 0))
358  		unlink(tmpbuf);
359  
360  	if (pathname && (fd >= 0))
361  		strlcpy(pathname, tmpbuf, len);
362  
363  	return fd;
364  }
365  
decompress_kmodule(struct dso * dso,const char * name,char * pathname,size_t len)366  static int decompress_kmodule(struct dso *dso, const char *name,
367  			      char *pathname, size_t len)
368  {
369  	if (!dso__needs_decompress(dso))
370  		return -1;
371  
372  	if (dso__comp(dso) == COMP_ID__NONE)
373  		return -1;
374  
375  	return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso));
376  }
377  
dso__decompress_kmodule_fd(struct dso * dso,const char * name)378  int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
379  {
380  	return decompress_kmodule(dso, name, NULL, 0);
381  }
382  
dso__decompress_kmodule_path(struct dso * dso,const char * name,char * pathname,size_t len)383  int dso__decompress_kmodule_path(struct dso *dso, const char *name,
384  				 char *pathname, size_t len)
385  {
386  	int fd = decompress_kmodule(dso, name, pathname, len);
387  
388  	close(fd);
389  	return fd >= 0 ? 0 : -1;
390  }
391  
392  /*
393   * Parses kernel module specified in @path and updates
394   * @m argument like:
395   *
396   *    @comp - true if @path contains supported compression suffix,
397   *            false otherwise
398   *    @kmod - true if @path contains '.ko' suffix in right position,
399   *            false otherwise
400   *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
401   *            of the kernel module without suffixes, otherwise strudup-ed
402   *            base name of @path
403   *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
404   *            the compression suffix
405   *
406   * Returns 0 if there's no strdup error, -ENOMEM otherwise.
407   */
__kmod_path__parse(struct kmod_path * m,const char * path,bool alloc_name)408  int __kmod_path__parse(struct kmod_path *m, const char *path,
409  		       bool alloc_name)
410  {
411  	const char *name = strrchr(path, '/');
412  	const char *ext  = strrchr(path, '.');
413  	bool is_simple_name = false;
414  
415  	memset(m, 0x0, sizeof(*m));
416  	name = name ? name + 1 : path;
417  
418  	/*
419  	 * '.' is also a valid character for module name. For example:
420  	 * [aaa.bbb] is a valid module name. '[' should have higher
421  	 * priority than '.ko' suffix.
422  	 *
423  	 * The kernel names are from machine__mmap_name. Such
424  	 * name should belong to kernel itself, not kernel module.
425  	 */
426  	if (name[0] == '[') {
427  		is_simple_name = true;
428  		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
429  		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
430  		    (strncmp(name, "[vdso]", 6) == 0) ||
431  		    (strncmp(name, "[vdso32]", 8) == 0) ||
432  		    (strncmp(name, "[vdsox32]", 9) == 0) ||
433  		    (strncmp(name, "[vsyscall]", 10) == 0)) {
434  			m->kmod = false;
435  
436  		} else
437  			m->kmod = true;
438  	}
439  
440  	/* No extension, just return name. */
441  	if ((ext == NULL) || is_simple_name) {
442  		if (alloc_name) {
443  			m->name = strdup(name);
444  			return m->name ? 0 : -ENOMEM;
445  		}
446  		return 0;
447  	}
448  
449  	m->comp = is_supported_compression(ext + 1);
450  	if (m->comp > COMP_ID__NONE)
451  		ext -= 3;
452  
453  	/* Check .ko extension only if there's enough name left. */
454  	if (ext > name)
455  		m->kmod = !strncmp(ext, ".ko", 3);
456  
457  	if (alloc_name) {
458  		if (m->kmod) {
459  			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
460  				return -ENOMEM;
461  		} else {
462  			if (asprintf(&m->name, "%s", name) == -1)
463  				return -ENOMEM;
464  		}
465  
466  		strreplace(m->name, '-', '_');
467  	}
468  
469  	return 0;
470  }
471  
dso__set_module_info(struct dso * dso,struct kmod_path * m,struct machine * machine)472  void dso__set_module_info(struct dso *dso, struct kmod_path *m,
473  			  struct machine *machine)
474  {
475  	if (machine__is_host(machine))
476  		dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE);
477  	else
478  		dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE);
479  
480  	/* _KMODULE_COMP should be next to _KMODULE */
481  	if (m->kmod && m->comp) {
482  		dso__set_symtab_type(dso, dso__symtab_type(dso) + 1);
483  		dso__set_comp(dso, m->comp);
484  	}
485  
486  	dso__set_is_kmod(dso);
487  	dso__set_short_name(dso, strdup(m->name), true);
488  }
489  
490  /*
491   * Global list of open DSOs and the counter.
492   */
493  static LIST_HEAD(dso__data_open);
494  static long dso__data_open_cnt;
495  static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
496  
dso__list_add(struct dso * dso)497  static void dso__list_add(struct dso *dso)
498  {
499  	list_add_tail(&dso__data(dso)->open_entry, &dso__data_open);
500  #ifdef REFCNT_CHECKING
501  	dso__data(dso)->dso = dso__get(dso);
502  #endif
503  	/* Assume the dso is part of dsos, hence the optional reference count above. */
504  	assert(dso__dsos(dso));
505  	dso__data_open_cnt++;
506  }
507  
dso__list_del(struct dso * dso)508  static void dso__list_del(struct dso *dso)
509  {
510  	list_del_init(&dso__data(dso)->open_entry);
511  #ifdef REFCNT_CHECKING
512  	dso__put(dso__data(dso)->dso);
513  #endif
514  	WARN_ONCE(dso__data_open_cnt <= 0,
515  		  "DSO data fd counter out of bounds.");
516  	dso__data_open_cnt--;
517  }
518  
519  static void close_first_dso(void);
520  
do_open(char * name)521  static int do_open(char *name)
522  {
523  	int fd;
524  	char sbuf[STRERR_BUFSIZE];
525  
526  	do {
527  		fd = open(name, O_RDONLY|O_CLOEXEC);
528  		if (fd >= 0)
529  			return fd;
530  
531  		pr_debug("dso open failed: %s\n",
532  			 str_error_r(errno, sbuf, sizeof(sbuf)));
533  		if (!dso__data_open_cnt || errno != EMFILE)
534  			break;
535  
536  		close_first_dso();
537  	} while (1);
538  
539  	return -1;
540  }
541  
dso__filename_with_chroot(const struct dso * dso,const char * filename)542  char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
543  {
544  	return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename);
545  }
546  
__open_dso(struct dso * dso,struct machine * machine)547  static int __open_dso(struct dso *dso, struct machine *machine)
548  {
549  	int fd = -EINVAL;
550  	char *root_dir = (char *)"";
551  	char *name = malloc(PATH_MAX);
552  	bool decomp = false;
553  
554  	if (!name)
555  		return -ENOMEM;
556  
557  	mutex_lock(dso__lock(dso));
558  	if (machine)
559  		root_dir = machine->root_dir;
560  
561  	if (dso__read_binary_type_filename(dso, dso__binary_type(dso),
562  					    root_dir, name, PATH_MAX))
563  		goto out;
564  
565  	if (!is_regular_file(name)) {
566  		char *new_name;
567  
568  		if (errno != ENOENT || dso__nsinfo(dso) == NULL)
569  			goto out;
570  
571  		new_name = dso__filename_with_chroot(dso, name);
572  		if (!new_name)
573  			goto out;
574  
575  		free(name);
576  		name = new_name;
577  	}
578  
579  	if (dso__needs_decompress(dso)) {
580  		char newpath[KMOD_DECOMP_LEN];
581  		size_t len = sizeof(newpath);
582  
583  		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
584  			fd = -(*dso__load_errno(dso));
585  			goto out;
586  		}
587  
588  		decomp = true;
589  		strcpy(name, newpath);
590  	}
591  
592  	fd = do_open(name);
593  
594  	if (decomp)
595  		unlink(name);
596  
597  out:
598  	mutex_unlock(dso__lock(dso));
599  	free(name);
600  	return fd;
601  }
602  
603  static void check_data_close(void);
604  
605  /**
606   * dso_close - Open DSO data file
607   * @dso: dso object
608   *
609   * Open @dso's data file descriptor and updates
610   * list/count of open DSO objects.
611   */
open_dso(struct dso * dso,struct machine * machine)612  static int open_dso(struct dso *dso, struct machine *machine)
613  {
614  	int fd;
615  	struct nscookie nsc;
616  
617  	if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
618  		mutex_lock(dso__lock(dso));
619  		nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
620  		mutex_unlock(dso__lock(dso));
621  	}
622  	fd = __open_dso(dso, machine);
623  	if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE)
624  		nsinfo__mountns_exit(&nsc);
625  
626  	if (fd >= 0) {
627  		dso__list_add(dso);
628  		/*
629  		 * Check if we crossed the allowed number
630  		 * of opened DSOs and close one if needed.
631  		 */
632  		check_data_close();
633  	}
634  
635  	return fd;
636  }
637  
close_data_fd(struct dso * dso)638  static void close_data_fd(struct dso *dso)
639  {
640  	if (dso__data(dso)->fd >= 0) {
641  		close(dso__data(dso)->fd);
642  		dso__data(dso)->fd = -1;
643  		dso__data(dso)->file_size = 0;
644  		dso__list_del(dso);
645  	}
646  }
647  
648  /**
649   * dso_close - Close DSO data file
650   * @dso: dso object
651   *
652   * Close @dso's data file descriptor and updates
653   * list/count of open DSO objects.
654   */
close_dso(struct dso * dso)655  static void close_dso(struct dso *dso)
656  {
657  	close_data_fd(dso);
658  }
659  
close_first_dso(void)660  static void close_first_dso(void)
661  {
662  	struct dso_data *dso_data;
663  	struct dso *dso;
664  
665  	dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry);
666  #ifdef REFCNT_CHECKING
667  	dso = dso_data->dso;
668  #else
669  	dso = container_of(dso_data, struct dso, data);
670  #endif
671  	close_dso(dso);
672  }
673  
get_fd_limit(void)674  static rlim_t get_fd_limit(void)
675  {
676  	struct rlimit l;
677  	rlim_t limit = 0;
678  
679  	/* Allow half of the current open fd limit. */
680  	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
681  		if (l.rlim_cur == RLIM_INFINITY)
682  			limit = l.rlim_cur;
683  		else
684  			limit = l.rlim_cur / 2;
685  	} else {
686  		pr_err("failed to get fd limit\n");
687  		limit = 1;
688  	}
689  
690  	return limit;
691  }
692  
693  static rlim_t fd_limit;
694  
695  /*
696   * Used only by tests/dso-data.c to reset the environment
697   * for tests. I dont expect we should change this during
698   * standard runtime.
699   */
reset_fd_limit(void)700  void reset_fd_limit(void)
701  {
702  	fd_limit = 0;
703  }
704  
may_cache_fd(void)705  static bool may_cache_fd(void)
706  {
707  	if (!fd_limit)
708  		fd_limit = get_fd_limit();
709  
710  	if (fd_limit == RLIM_INFINITY)
711  		return true;
712  
713  	return fd_limit > (rlim_t) dso__data_open_cnt;
714  }
715  
716  /*
717   * Check and close LRU dso if we crossed allowed limit
718   * for opened dso file descriptors. The limit is half
719   * of the RLIMIT_NOFILE files opened.
720  */
check_data_close(void)721  static void check_data_close(void)
722  {
723  	bool cache_fd = may_cache_fd();
724  
725  	if (!cache_fd)
726  		close_first_dso();
727  }
728  
729  /**
730   * dso__data_close - Close DSO data file
731   * @dso: dso object
732   *
733   * External interface to close @dso's data file descriptor.
734   */
dso__data_close(struct dso * dso)735  void dso__data_close(struct dso *dso)
736  {
737  	pthread_mutex_lock(&dso__data_open_lock);
738  	close_dso(dso);
739  	pthread_mutex_unlock(&dso__data_open_lock);
740  }
741  
try_to_open_dso(struct dso * dso,struct machine * machine)742  static void try_to_open_dso(struct dso *dso, struct machine *machine)
743  {
744  	enum dso_binary_type binary_type_data[] = {
745  		DSO_BINARY_TYPE__BUILD_ID_CACHE,
746  		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
747  		DSO_BINARY_TYPE__NOT_FOUND,
748  	};
749  	int i = 0;
750  	struct dso_data *dso_data = dso__data(dso);
751  
752  	if (dso_data->fd >= 0)
753  		return;
754  
755  	if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) {
756  		dso_data->fd = open_dso(dso, machine);
757  		goto out;
758  	}
759  
760  	do {
761  		dso__set_binary_type(dso, binary_type_data[i++]);
762  
763  		dso_data->fd = open_dso(dso, machine);
764  		if (dso_data->fd >= 0)
765  			goto out;
766  
767  	} while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND);
768  out:
769  	if (dso_data->fd >= 0)
770  		dso_data->status = DSO_DATA_STATUS_OK;
771  	else
772  		dso_data->status = DSO_DATA_STATUS_ERROR;
773  }
774  
775  /**
776   * dso__data_get_fd - Get dso's data file descriptor
777   * @dso: dso object
778   * @machine: machine object
779   *
780   * External interface to find dso's file, open it and
781   * returns file descriptor.  It should be paired with
782   * dso__data_put_fd() if it returns non-negative value.
783   */
dso__data_get_fd(struct dso * dso,struct machine * machine)784  int dso__data_get_fd(struct dso *dso, struct machine *machine)
785  {
786  	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
787  		return -1;
788  
789  	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
790  		return -1;
791  
792  	try_to_open_dso(dso, machine);
793  
794  	if (dso__data(dso)->fd < 0)
795  		pthread_mutex_unlock(&dso__data_open_lock);
796  
797  	return dso__data(dso)->fd;
798  }
799  
dso__data_put_fd(struct dso * dso __maybe_unused)800  void dso__data_put_fd(struct dso *dso __maybe_unused)
801  {
802  	pthread_mutex_unlock(&dso__data_open_lock);
803  }
804  
dso__data_status_seen(struct dso * dso,enum dso_data_status_seen by)805  bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
806  {
807  	u32 flag = 1 << by;
808  
809  	if (dso__data(dso)->status_seen & flag)
810  		return true;
811  
812  	dso__data(dso)->status_seen |= flag;
813  
814  	return false;
815  }
816  
817  #ifdef HAVE_LIBBPF_SUPPORT
bpf_read(struct dso * dso,u64 offset,char * data)818  static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
819  {
820  	struct bpf_prog_info_node *node;
821  	ssize_t size = DSO__DATA_CACHE_SIZE;
822  	struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
823  	u64 len;
824  	u8 *buf;
825  
826  	node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
827  	if (!node || !node->info_linear) {
828  		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
829  		return -1;
830  	}
831  
832  	len = node->info_linear->info.jited_prog_len;
833  	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
834  
835  	if (offset >= len)
836  		return -1;
837  
838  	size = (ssize_t)min(len - offset, (u64)size);
839  	memcpy(data, buf + offset, size);
840  	return size;
841  }
842  
bpf_size(struct dso * dso)843  static int bpf_size(struct dso *dso)
844  {
845  	struct bpf_prog_info_node *node;
846  	struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
847  
848  	node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
849  	if (!node || !node->info_linear) {
850  		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
851  		return -1;
852  	}
853  
854  	dso__data(dso)->file_size = node->info_linear->info.jited_prog_len;
855  	return 0;
856  }
857  #endif // HAVE_LIBBPF_SUPPORT
858  
859  static void
dso_cache__free(struct dso * dso)860  dso_cache__free(struct dso *dso)
861  {
862  	struct rb_root *root = &dso__data(dso)->cache;
863  	struct rb_node *next = rb_first(root);
864  
865  	mutex_lock(dso__lock(dso));
866  	while (next) {
867  		struct dso_cache *cache;
868  
869  		cache = rb_entry(next, struct dso_cache, rb_node);
870  		next = rb_next(&cache->rb_node);
871  		rb_erase(&cache->rb_node, root);
872  		free(cache);
873  	}
874  	mutex_unlock(dso__lock(dso));
875  }
876  
__dso_cache__find(struct dso * dso,u64 offset)877  static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
878  {
879  	const struct rb_root *root = &dso__data(dso)->cache;
880  	struct rb_node * const *p = &root->rb_node;
881  	const struct rb_node *parent = NULL;
882  	struct dso_cache *cache;
883  
884  	while (*p != NULL) {
885  		u64 end;
886  
887  		parent = *p;
888  		cache = rb_entry(parent, struct dso_cache, rb_node);
889  		end = cache->offset + DSO__DATA_CACHE_SIZE;
890  
891  		if (offset < cache->offset)
892  			p = &(*p)->rb_left;
893  		else if (offset >= end)
894  			p = &(*p)->rb_right;
895  		else
896  			return cache;
897  	}
898  
899  	return NULL;
900  }
901  
902  static struct dso_cache *
dso_cache__insert(struct dso * dso,struct dso_cache * new)903  dso_cache__insert(struct dso *dso, struct dso_cache *new)
904  {
905  	struct rb_root *root = &dso__data(dso)->cache;
906  	struct rb_node **p = &root->rb_node;
907  	struct rb_node *parent = NULL;
908  	struct dso_cache *cache;
909  	u64 offset = new->offset;
910  
911  	mutex_lock(dso__lock(dso));
912  	while (*p != NULL) {
913  		u64 end;
914  
915  		parent = *p;
916  		cache = rb_entry(parent, struct dso_cache, rb_node);
917  		end = cache->offset + DSO__DATA_CACHE_SIZE;
918  
919  		if (offset < cache->offset)
920  			p = &(*p)->rb_left;
921  		else if (offset >= end)
922  			p = &(*p)->rb_right;
923  		else
924  			goto out;
925  	}
926  
927  	rb_link_node(&new->rb_node, parent, p);
928  	rb_insert_color(&new->rb_node, root);
929  
930  	cache = NULL;
931  out:
932  	mutex_unlock(dso__lock(dso));
933  	return cache;
934  }
935  
dso_cache__memcpy(struct dso_cache * cache,u64 offset,u8 * data,u64 size,bool out)936  static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
937  				 u64 size, bool out)
938  {
939  	u64 cache_offset = offset - cache->offset;
940  	u64 cache_size   = min(cache->size - cache_offset, size);
941  
942  	if (out)
943  		memcpy(data, cache->data + cache_offset, cache_size);
944  	else
945  		memcpy(cache->data + cache_offset, data, cache_size);
946  	return cache_size;
947  }
948  
file_read(struct dso * dso,struct machine * machine,u64 offset,char * data)949  static ssize_t file_read(struct dso *dso, struct machine *machine,
950  			 u64 offset, char *data)
951  {
952  	ssize_t ret;
953  
954  	pthread_mutex_lock(&dso__data_open_lock);
955  
956  	/*
957  	 * dso__data(dso)->fd might be closed if other thread opened another
958  	 * file (dso) due to open file limit (RLIMIT_NOFILE).
959  	 */
960  	try_to_open_dso(dso, machine);
961  
962  	if (dso__data(dso)->fd < 0) {
963  		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
964  		ret = -errno;
965  		goto out;
966  	}
967  
968  	ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset);
969  out:
970  	pthread_mutex_unlock(&dso__data_open_lock);
971  	return ret;
972  }
973  
dso_cache__populate(struct dso * dso,struct machine * machine,u64 offset,ssize_t * ret)974  static struct dso_cache *dso_cache__populate(struct dso *dso,
975  					     struct machine *machine,
976  					     u64 offset, ssize_t *ret)
977  {
978  	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
979  	struct dso_cache *cache;
980  	struct dso_cache *old;
981  
982  	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
983  	if (!cache) {
984  		*ret = -ENOMEM;
985  		return NULL;
986  	}
987  #ifdef HAVE_LIBBPF_SUPPORT
988  	if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
989  		*ret = bpf_read(dso, cache_offset, cache->data);
990  	else
991  #endif
992  	if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL)
993  		*ret = DSO__DATA_CACHE_SIZE;
994  	else
995  		*ret = file_read(dso, machine, cache_offset, cache->data);
996  
997  	if (*ret <= 0) {
998  		free(cache);
999  		return NULL;
1000  	}
1001  
1002  	cache->offset = cache_offset;
1003  	cache->size   = *ret;
1004  
1005  	old = dso_cache__insert(dso, cache);
1006  	if (old) {
1007  		/* we lose the race */
1008  		free(cache);
1009  		cache = old;
1010  	}
1011  
1012  	return cache;
1013  }
1014  
dso_cache__find(struct dso * dso,struct machine * machine,u64 offset,ssize_t * ret)1015  static struct dso_cache *dso_cache__find(struct dso *dso,
1016  					 struct machine *machine,
1017  					 u64 offset,
1018  					 ssize_t *ret)
1019  {
1020  	struct dso_cache *cache = __dso_cache__find(dso, offset);
1021  
1022  	return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
1023  }
1024  
dso_cache_io(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1025  static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
1026  			    u64 offset, u8 *data, ssize_t size, bool out)
1027  {
1028  	struct dso_cache *cache;
1029  	ssize_t ret = 0;
1030  
1031  	cache = dso_cache__find(dso, machine, offset, &ret);
1032  	if (!cache)
1033  		return ret;
1034  
1035  	return dso_cache__memcpy(cache, offset, data, size, out);
1036  }
1037  
1038  /*
1039   * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1040   * in the rb_tree. Any read to already cached data is served
1041   * by cached data. Writes update the cache only, not the backing file.
1042   */
cached_io(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1043  static ssize_t cached_io(struct dso *dso, struct machine *machine,
1044  			 u64 offset, u8 *data, ssize_t size, bool out)
1045  {
1046  	ssize_t r = 0;
1047  	u8 *p = data;
1048  
1049  	do {
1050  		ssize_t ret;
1051  
1052  		ret = dso_cache_io(dso, machine, offset, p, size, out);
1053  		if (ret < 0)
1054  			return ret;
1055  
1056  		/* Reached EOF, return what we have. */
1057  		if (!ret)
1058  			break;
1059  
1060  		BUG_ON(ret > size);
1061  
1062  		r      += ret;
1063  		p      += ret;
1064  		offset += ret;
1065  		size   -= ret;
1066  
1067  	} while (size);
1068  
1069  	return r;
1070  }
1071  
file_size(struct dso * dso,struct machine * machine)1072  static int file_size(struct dso *dso, struct machine *machine)
1073  {
1074  	int ret = 0;
1075  	struct stat st;
1076  	char sbuf[STRERR_BUFSIZE];
1077  
1078  	pthread_mutex_lock(&dso__data_open_lock);
1079  
1080  	/*
1081  	 * dso__data(dso)->fd might be closed if other thread opened another
1082  	 * file (dso) due to open file limit (RLIMIT_NOFILE).
1083  	 */
1084  	try_to_open_dso(dso, machine);
1085  
1086  	if (dso__data(dso)->fd < 0) {
1087  		ret = -errno;
1088  		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
1089  		goto out;
1090  	}
1091  
1092  	if (fstat(dso__data(dso)->fd, &st) < 0) {
1093  		ret = -errno;
1094  		pr_err("dso cache fstat failed: %s\n",
1095  		       str_error_r(errno, sbuf, sizeof(sbuf)));
1096  		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
1097  		goto out;
1098  	}
1099  	dso__data(dso)->file_size = st.st_size;
1100  
1101  out:
1102  	pthread_mutex_unlock(&dso__data_open_lock);
1103  	return ret;
1104  }
1105  
dso__data_file_size(struct dso * dso,struct machine * machine)1106  int dso__data_file_size(struct dso *dso, struct machine *machine)
1107  {
1108  	if (dso__data(dso)->file_size)
1109  		return 0;
1110  
1111  	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1112  		return -1;
1113  #ifdef HAVE_LIBBPF_SUPPORT
1114  	if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
1115  		return bpf_size(dso);
1116  #endif
1117  	return file_size(dso, machine);
1118  }
1119  
1120  /**
1121   * dso__data_size - Return dso data size
1122   * @dso: dso object
1123   * @machine: machine object
1124   *
1125   * Return: dso data size
1126   */
dso__data_size(struct dso * dso,struct machine * machine)1127  off_t dso__data_size(struct dso *dso, struct machine *machine)
1128  {
1129  	if (dso__data_file_size(dso, machine))
1130  		return -1;
1131  
1132  	/* For now just estimate dso data size is close to file size */
1133  	return dso__data(dso)->file_size;
1134  }
1135  
data_read_write_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1136  static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1137  				      u64 offset, u8 *data, ssize_t size,
1138  				      bool out)
1139  {
1140  	if (dso__data_file_size(dso, machine))
1141  		return -1;
1142  
1143  	/* Check the offset sanity. */
1144  	if (offset > dso__data(dso)->file_size)
1145  		return -1;
1146  
1147  	if (offset + size < offset)
1148  		return -1;
1149  
1150  	return cached_io(dso, machine, offset, data, size, out);
1151  }
1152  
1153  /**
1154   * dso__data_read_offset - Read data from dso file offset
1155   * @dso: dso object
1156   * @machine: machine object
1157   * @offset: file offset
1158   * @data: buffer to store data
1159   * @size: size of the @data buffer
1160   *
1161   * External interface to read data from dso file offset. Open
1162   * dso data file and use cached_read to get the data.
1163   */
dso__data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)1164  ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1165  			      u64 offset, u8 *data, ssize_t size)
1166  {
1167  	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1168  		return -1;
1169  
1170  	return data_read_write_offset(dso, machine, offset, data, size, true);
1171  }
1172  
1173  /**
1174   * dso__data_read_addr - Read data from dso address
1175   * @dso: dso object
1176   * @machine: machine object
1177   * @add: virtual memory address
1178   * @data: buffer to store data
1179   * @size: size of the @data buffer
1180   *
1181   * External interface to read data from dso address.
1182   */
dso__data_read_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,u8 * data,ssize_t size)1183  ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1184  			    struct machine *machine, u64 addr,
1185  			    u8 *data, ssize_t size)
1186  {
1187  	u64 offset = map__map_ip(map, addr);
1188  
1189  	return dso__data_read_offset(dso, machine, offset, data, size);
1190  }
1191  
1192  /**
1193   * dso__data_write_cache_offs - Write data to dso data cache at file offset
1194   * @dso: dso object
1195   * @machine: machine object
1196   * @offset: file offset
1197   * @data: buffer to write
1198   * @size: size of the @data buffer
1199   *
1200   * Write into the dso file data cache, but do not change the file itself.
1201   */
dso__data_write_cache_offs(struct dso * dso,struct machine * machine,u64 offset,const u8 * data_in,ssize_t size)1202  ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1203  				   u64 offset, const u8 *data_in, ssize_t size)
1204  {
1205  	u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1206  
1207  	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1208  		return -1;
1209  
1210  	return data_read_write_offset(dso, machine, offset, data, size, false);
1211  }
1212  
1213  /**
1214   * dso__data_write_cache_addr - Write data to dso data cache at dso address
1215   * @dso: dso object
1216   * @machine: machine object
1217   * @add: virtual memory address
1218   * @data: buffer to write
1219   * @size: size of the @data buffer
1220   *
1221   * External interface to write into the dso file data cache, but do not change
1222   * the file itself.
1223   */
dso__data_write_cache_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,const u8 * data,ssize_t size)1224  ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1225  				   struct machine *machine, u64 addr,
1226  				   const u8 *data, ssize_t size)
1227  {
1228  	u64 offset = map__map_ip(map, addr);
1229  
1230  	return dso__data_write_cache_offs(dso, machine, offset, data, size);
1231  }
1232  
dso__new_map(const char * name)1233  struct map *dso__new_map(const char *name)
1234  {
1235  	struct map *map = NULL;
1236  	struct dso *dso = dso__new(name);
1237  
1238  	if (dso) {
1239  		map = map__new2(0, dso);
1240  		dso__put(dso);
1241  	}
1242  
1243  	return map;
1244  }
1245  
machine__findnew_kernel(struct machine * machine,const char * name,const char * short_name,int dso_type)1246  struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1247  				    const char *short_name, int dso_type)
1248  {
1249  	/*
1250  	 * The kernel dso could be created by build_id processing.
1251  	 */
1252  	struct dso *dso = machine__findnew_dso(machine, name);
1253  
1254  	/*
1255  	 * We need to run this in all cases, since during the build_id
1256  	 * processing we had no idea this was the kernel dso.
1257  	 */
1258  	if (dso != NULL) {
1259  		dso__set_short_name(dso, short_name, false);
1260  		dso__set_kernel(dso, dso_type);
1261  	}
1262  
1263  	return dso;
1264  }
1265  
dso__set_long_name_id(struct dso * dso,const char * name,bool name_allocated)1266  static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
1267  {
1268  	struct dsos *dsos = dso__dsos(dso);
1269  
1270  	if (name == NULL)
1271  		return;
1272  
1273  	if (dsos) {
1274  		/*
1275  		 * Need to avoid re-sorting the dsos breaking by non-atomically
1276  		 * renaming the dso.
1277  		 */
1278  		down_write(&dsos->lock);
1279  	}
1280  
1281  	if (dso__long_name_allocated(dso))
1282  		free((char *)dso__long_name(dso));
1283  
1284  	RC_CHK_ACCESS(dso)->long_name = name;
1285  	RC_CHK_ACCESS(dso)->long_name_len = strlen(name);
1286  	dso__set_long_name_allocated(dso, name_allocated);
1287  
1288  	if (dsos) {
1289  		dsos->sorted = false;
1290  		up_write(&dsos->lock);
1291  	}
1292  }
1293  
__dso_id__cmp(const struct dso_id * a,const struct dso_id * b)1294  static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
1295  {
1296  	if (a->maj > b->maj) return -1;
1297  	if (a->maj < b->maj) return 1;
1298  
1299  	if (a->min > b->min) return -1;
1300  	if (a->min < b->min) return 1;
1301  
1302  	if (a->ino > b->ino) return -1;
1303  	if (a->ino < b->ino) return 1;
1304  
1305  	/*
1306  	 * Synthesized MMAP events have zero ino_generation, avoid comparing
1307  	 * them with MMAP events with actual ino_generation.
1308  	 *
1309  	 * I found it harmful because the mismatch resulted in a new
1310  	 * dso that did not have a build ID whereas the original dso did have a
1311  	 * build ID. The build ID was essential because the object was not found
1312  	 * otherwise. - Adrian
1313  	 */
1314  	if (a->ino_generation && b->ino_generation) {
1315  		if (a->ino_generation > b->ino_generation) return -1;
1316  		if (a->ino_generation < b->ino_generation) return 1;
1317  	}
1318  
1319  	return 0;
1320  }
1321  
dso_id__empty(const struct dso_id * id)1322  bool dso_id__empty(const struct dso_id *id)
1323  {
1324  	if (!id)
1325  		return true;
1326  
1327  	return !id->maj && !id->min && !id->ino && !id->ino_generation;
1328  }
1329  
__dso__inject_id(struct dso * dso,const struct dso_id * id)1330  void __dso__inject_id(struct dso *dso, const struct dso_id *id)
1331  {
1332  	struct dsos *dsos = dso__dsos(dso);
1333  	struct dso_id *dso_id = dso__id(dso);
1334  
1335  	/* dsos write lock held by caller. */
1336  
1337  	dso_id->maj = id->maj;
1338  	dso_id->min = id->min;
1339  	dso_id->ino = id->ino;
1340  	dso_id->ino_generation = id->ino_generation;
1341  
1342  	if (dsos)
1343  		dsos->sorted = false;
1344  }
1345  
dso_id__cmp(const struct dso_id * a,const struct dso_id * b)1346  int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
1347  {
1348  	/*
1349  	 * The second is always dso->id, so zeroes if not set, assume passing
1350  	 * NULL for a means a zeroed id
1351  	 */
1352  	if (dso_id__empty(a) || dso_id__empty(b))
1353  		return 0;
1354  
1355  	return __dso_id__cmp(a, b);
1356  }
1357  
dso__cmp_id(struct dso * a,struct dso * b)1358  int dso__cmp_id(struct dso *a, struct dso *b)
1359  {
1360  	return __dso_id__cmp(dso__id(a), dso__id(b));
1361  }
1362  
dso__set_long_name(struct dso * dso,const char * name,bool name_allocated)1363  void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1364  {
1365  	dso__set_long_name_id(dso, name, name_allocated);
1366  }
1367  
dso__set_short_name(struct dso * dso,const char * name,bool name_allocated)1368  void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1369  {
1370  	struct dsos *dsos = dso__dsos(dso);
1371  
1372  	if (name == NULL)
1373  		return;
1374  
1375  	if (dsos) {
1376  		/*
1377  		 * Need to avoid re-sorting the dsos breaking by non-atomically
1378  		 * renaming the dso.
1379  		 */
1380  		down_write(&dsos->lock);
1381  	}
1382  	if (dso__short_name_allocated(dso))
1383  		free((char *)dso__short_name(dso));
1384  
1385  	RC_CHK_ACCESS(dso)->short_name		  = name;
1386  	RC_CHK_ACCESS(dso)->short_name_len	  = strlen(name);
1387  	dso__set_short_name_allocated(dso, name_allocated);
1388  
1389  	if (dsos) {
1390  		dsos->sorted = false;
1391  		up_write(&dsos->lock);
1392  	}
1393  }
1394  
dso__name_len(const struct dso * dso)1395  int dso__name_len(const struct dso *dso)
1396  {
1397  	if (!dso)
1398  		return strlen("[unknown]");
1399  	if (verbose > 0)
1400  		return dso__long_name_len(dso);
1401  
1402  	return dso__short_name_len(dso);
1403  }
1404  
dso__loaded(const struct dso * dso)1405  bool dso__loaded(const struct dso *dso)
1406  {
1407  	return RC_CHK_ACCESS(dso)->loaded;
1408  }
1409  
dso__sorted_by_name(const struct dso * dso)1410  bool dso__sorted_by_name(const struct dso *dso)
1411  {
1412  	return RC_CHK_ACCESS(dso)->sorted_by_name;
1413  }
1414  
dso__set_sorted_by_name(struct dso * dso)1415  void dso__set_sorted_by_name(struct dso *dso)
1416  {
1417  	RC_CHK_ACCESS(dso)->sorted_by_name = true;
1418  }
1419  
dso__new_id(const char * name,const struct dso_id * id)1420  struct dso *dso__new_id(const char *name, const struct dso_id *id)
1421  {
1422  	RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1);
1423  	struct dso *res;
1424  	struct dso_data *data;
1425  
1426  	if (!dso)
1427  		return NULL;
1428  
1429  	if (ADD_RC_CHK(res, dso)) {
1430  		strcpy(dso->name, name);
1431  		if (id)
1432  			dso->id = *id;
1433  		dso__set_long_name_id(res, dso->name, false);
1434  		dso__set_short_name(res, dso->name, false);
1435  		dso->symbols = RB_ROOT_CACHED;
1436  		dso->symbol_names = NULL;
1437  		dso->symbol_names_len = 0;
1438  		dso->inlined_nodes = RB_ROOT_CACHED;
1439  		dso->srclines = RB_ROOT_CACHED;
1440  		dso->data_types = RB_ROOT;
1441  		dso->global_vars = RB_ROOT;
1442  		dso->data.fd = -1;
1443  		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1444  		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1445  		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1446  		dso->is_64_bit = (sizeof(void *) == 8);
1447  		dso->loaded = 0;
1448  		dso->rel = 0;
1449  		dso->sorted_by_name = 0;
1450  		dso->has_build_id = 0;
1451  		dso->has_srcline = 1;
1452  		dso->a2l_fails = 1;
1453  		dso->kernel = DSO_SPACE__USER;
1454  		dso->is_kmod = 0;
1455  		dso->needs_swap = DSO_SWAP__UNSET;
1456  		dso->comp = COMP_ID__NONE;
1457  		mutex_init(&dso->lock);
1458  		refcount_set(&dso->refcnt, 1);
1459  		data = &dso->data;
1460  		data->cache = RB_ROOT;
1461  		data->fd = -1;
1462  		data->status = DSO_DATA_STATUS_UNKNOWN;
1463  		INIT_LIST_HEAD(&data->open_entry);
1464  #ifdef REFCNT_CHECKING
1465  		data->dso = NULL; /* Set when on the open_entry list. */
1466  #endif
1467  	}
1468  	return res;
1469  }
1470  
dso__new(const char * name)1471  struct dso *dso__new(const char *name)
1472  {
1473  	return dso__new_id(name, NULL);
1474  }
1475  
dso__delete(struct dso * dso)1476  void dso__delete(struct dso *dso)
1477  {
1478  	if (dso__dsos(dso))
1479  		pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso));
1480  
1481  	/* free inlines first, as they reference symbols */
1482  	inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes);
1483  	srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines);
1484  	symbols__delete(&RC_CHK_ACCESS(dso)->symbols);
1485  	RC_CHK_ACCESS(dso)->symbol_names_len = 0;
1486  	zfree(&RC_CHK_ACCESS(dso)->symbol_names);
1487  	annotated_data_type__tree_delete(dso__data_types(dso));
1488  	global_var_type__tree_delete(dso__global_vars(dso));
1489  
1490  	if (RC_CHK_ACCESS(dso)->short_name_allocated) {
1491  		zfree((char **)&RC_CHK_ACCESS(dso)->short_name);
1492  		RC_CHK_ACCESS(dso)->short_name_allocated = false;
1493  	}
1494  
1495  	if (RC_CHK_ACCESS(dso)->long_name_allocated) {
1496  		zfree((char **)&RC_CHK_ACCESS(dso)->long_name);
1497  		RC_CHK_ACCESS(dso)->long_name_allocated = false;
1498  	}
1499  
1500  	dso__data_close(dso);
1501  	auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache);
1502  	dso_cache__free(dso);
1503  	dso__free_a2l(dso);
1504  	dso__free_symsrc_filename(dso);
1505  	nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo);
1506  	mutex_destroy(dso__lock(dso));
1507  	RC_CHK_FREE(dso);
1508  }
1509  
dso__get(struct dso * dso)1510  struct dso *dso__get(struct dso *dso)
1511  {
1512  	struct dso *result;
1513  
1514  	if (RC_CHK_GET(result, dso))
1515  		refcount_inc(&RC_CHK_ACCESS(dso)->refcnt);
1516  
1517  	return result;
1518  }
1519  
dso__put(struct dso * dso)1520  void dso__put(struct dso *dso)
1521  {
1522  	if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt))
1523  		dso__delete(dso);
1524  	else
1525  		RC_CHK_PUT(dso);
1526  }
1527  
dso__set_build_id(struct dso * dso,struct build_id * bid)1528  void dso__set_build_id(struct dso *dso, struct build_id *bid)
1529  {
1530  	RC_CHK_ACCESS(dso)->bid = *bid;
1531  	RC_CHK_ACCESS(dso)->has_build_id = 1;
1532  }
1533  
dso__build_id_equal(const struct dso * dso,struct build_id * bid)1534  bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1535  {
1536  	const struct build_id *dso_bid = dso__bid_const(dso);
1537  
1538  	if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) {
1539  		/*
1540  		 * For the backward compatibility, it allows a build-id has
1541  		 * trailing zeros.
1542  		 */
1543  		return !memcmp(dso_bid->data, bid->data, bid->size) &&
1544  			!memchr_inv(&dso_bid->data[bid->size], 0,
1545  				    dso_bid->size - bid->size);
1546  	}
1547  
1548  	return dso_bid->size == bid->size &&
1549  	       memcmp(dso_bid->data, bid->data, dso_bid->size) == 0;
1550  }
1551  
dso__read_running_kernel_build_id(struct dso * dso,struct machine * machine)1552  void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1553  {
1554  	char path[PATH_MAX];
1555  
1556  	if (machine__is_default_guest(machine))
1557  		return;
1558  	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1559  	if (sysfs__read_build_id(path, dso__bid(dso)) == 0)
1560  		dso__set_has_build_id(dso);
1561  }
1562  
dso__kernel_module_get_build_id(struct dso * dso,const char * root_dir)1563  int dso__kernel_module_get_build_id(struct dso *dso,
1564  				    const char *root_dir)
1565  {
1566  	char filename[PATH_MAX];
1567  	/*
1568  	 * kernel module short names are of the form "[module]" and
1569  	 * we need just "module" here.
1570  	 */
1571  	const char *name = dso__short_name(dso) + 1;
1572  
1573  	snprintf(filename, sizeof(filename),
1574  		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1575  		 root_dir, (int)strlen(name) - 1, name);
1576  
1577  	if (sysfs__read_build_id(filename, dso__bid(dso)) == 0)
1578  		dso__set_has_build_id(dso);
1579  
1580  	return 0;
1581  }
1582  
dso__fprintf_buildid(struct dso * dso,FILE * fp)1583  static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1584  {
1585  	char sbuild_id[SBUILD_ID_SIZE];
1586  
1587  	build_id__sprintf(dso__bid(dso), sbuild_id);
1588  	return fprintf(fp, "%s", sbuild_id);
1589  }
1590  
dso__fprintf(struct dso * dso,FILE * fp)1591  size_t dso__fprintf(struct dso *dso, FILE *fp)
1592  {
1593  	struct rb_node *nd;
1594  	size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso));
1595  
1596  	if (dso__short_name(dso) != dso__long_name(dso))
1597  		ret += fprintf(fp, "%s, ", dso__long_name(dso));
1598  	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1599  	ret += dso__fprintf_buildid(dso, fp);
1600  	ret += fprintf(fp, ")\n");
1601  	for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) {
1602  		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1603  		ret += symbol__fprintf(pos, fp);
1604  	}
1605  
1606  	return ret;
1607  }
1608  
dso__type(struct dso * dso,struct machine * machine)1609  enum dso_type dso__type(struct dso *dso, struct machine *machine)
1610  {
1611  	int fd;
1612  	enum dso_type type = DSO__TYPE_UNKNOWN;
1613  
1614  	fd = dso__data_get_fd(dso, machine);
1615  	if (fd >= 0) {
1616  		type = dso__type_fd(fd);
1617  		dso__data_put_fd(dso);
1618  	}
1619  
1620  	return type;
1621  }
1622  
dso__strerror_load(struct dso * dso,char * buf,size_t buflen)1623  int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1624  {
1625  	int idx, errnum = *dso__load_errno(dso);
1626  	/*
1627  	 * This must have a same ordering as the enum dso_load_errno.
1628  	 */
1629  	static const char *dso_load__error_str[] = {
1630  	"Internal tools/perf/ library error",
1631  	"Invalid ELF file",
1632  	"Can not read build id",
1633  	"Mismatching build id",
1634  	"Decompression failure",
1635  	};
1636  
1637  	BUG_ON(buflen == 0);
1638  
1639  	if (errnum >= 0) {
1640  		const char *err = str_error_r(errnum, buf, buflen);
1641  
1642  		if (err != buf)
1643  			scnprintf(buf, buflen, "%s", err);
1644  
1645  		return 0;
1646  	}
1647  
1648  	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1649  		return -1;
1650  
1651  	idx = errnum - __DSO_LOAD_ERRNO__START;
1652  	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1653  	return 0;
1654  }
1655  
perf_pid_map_tid(const char * dso_name,int * tid)1656  bool perf_pid_map_tid(const char *dso_name, int *tid)
1657  {
1658  	return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1;
1659  }
1660  
is_perf_pid_map_name(const char * dso_name)1661  bool is_perf_pid_map_name(const char *dso_name)
1662  {
1663  	int tid;
1664  
1665  	return perf_pid_map_tid(dso_name, &tid);
1666  }
1667