1  // SPDX-License-Identifier: GPL-2.0
2  #include <errno.h>
3  #include <stdlib.h>
4  #include <stdio.h>
5  #include <string.h>
6  #include <linux/kernel.h>
7  #include <linux/zalloc.h>
8  #include "dso.h"
9  #include "session.h"
10  #include "thread.h"
11  #include "thread-stack.h"
12  #include "debug.h"
13  #include "namespaces.h"
14  #include "comm.h"
15  #include "map.h"
16  #include "symbol.h"
17  #include "unwind.h"
18  #include "callchain.h"
19  
20  #include <api/fs/fs.h>
21  
thread__init_maps(struct thread * thread,struct machine * machine)22  int thread__init_maps(struct thread *thread, struct machine *machine)
23  {
24  	pid_t pid = thread__pid(thread);
25  
26  	if (pid == thread__tid(thread) || pid == -1) {
27  		thread__set_maps(thread, maps__new(machine));
28  	} else {
29  		struct thread *leader = machine__findnew_thread(machine, pid, pid);
30  
31  		if (leader) {
32  			thread__set_maps(thread, maps__get(thread__maps(leader)));
33  			thread__put(leader);
34  		}
35  	}
36  
37  	return thread__maps(thread) ? 0 : -1;
38  }
39  
thread__new(pid_t pid,pid_t tid)40  struct thread *thread__new(pid_t pid, pid_t tid)
41  {
42  	RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
43  	struct thread *thread;
44  
45  	if (ADD_RC_CHK(thread, _thread) != NULL) {
46  		struct comm *comm;
47  		char comm_str[32];
48  
49  		thread__set_pid(thread, pid);
50  		thread__set_tid(thread, tid);
51  		thread__set_ppid(thread, -1);
52  		thread__set_cpu(thread, -1);
53  		thread__set_guest_cpu(thread, -1);
54  		thread__set_lbr_stitch_enable(thread, false);
55  		INIT_LIST_HEAD(thread__namespaces_list(thread));
56  		INIT_LIST_HEAD(thread__comm_list(thread));
57  		init_rwsem(thread__namespaces_lock(thread));
58  		init_rwsem(thread__comm_lock(thread));
59  
60  		snprintf(comm_str, sizeof(comm_str), ":%d", tid);
61  		comm = comm__new(comm_str, 0, false);
62  		if (!comm)
63  			goto err_thread;
64  
65  		list_add(&comm->list, thread__comm_list(thread));
66  		refcount_set(thread__refcnt(thread), 1);
67  		/* Thread holds first ref to nsdata. */
68  		RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
69  		srccode_state_init(thread__srccode_state(thread));
70  	}
71  
72  	return thread;
73  
74  err_thread:
75  	thread__delete(thread);
76  	return NULL;
77  }
78  
79  static void (*thread__priv_destructor)(void *priv);
80  
thread__set_priv_destructor(void (* destructor)(void * priv))81  void thread__set_priv_destructor(void (*destructor)(void *priv))
82  {
83  	assert(thread__priv_destructor == NULL);
84  
85  	thread__priv_destructor = destructor;
86  }
87  
thread__delete(struct thread * thread)88  void thread__delete(struct thread *thread)
89  {
90  	struct namespaces *namespaces, *tmp_namespaces;
91  	struct comm *comm, *tmp_comm;
92  
93  	thread_stack__free(thread);
94  
95  	if (thread__maps(thread)) {
96  		maps__put(thread__maps(thread));
97  		thread__set_maps(thread, NULL);
98  	}
99  	down_write(thread__namespaces_lock(thread));
100  	list_for_each_entry_safe(namespaces, tmp_namespaces,
101  				 thread__namespaces_list(thread), list) {
102  		list_del_init(&namespaces->list);
103  		namespaces__free(namespaces);
104  	}
105  	up_write(thread__namespaces_lock(thread));
106  
107  	down_write(thread__comm_lock(thread));
108  	list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
109  		list_del_init(&comm->list);
110  		comm__free(comm);
111  	}
112  	up_write(thread__comm_lock(thread));
113  
114  	nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
115  	srccode_state_free(thread__srccode_state(thread));
116  
117  	exit_rwsem(thread__namespaces_lock(thread));
118  	exit_rwsem(thread__comm_lock(thread));
119  	thread__free_stitch_list(thread);
120  
121  	if (thread__priv_destructor)
122  		thread__priv_destructor(thread__priv(thread));
123  
124  	RC_CHK_FREE(thread);
125  }
126  
thread__get(struct thread * thread)127  struct thread *thread__get(struct thread *thread)
128  {
129  	struct thread *result;
130  
131  	if (RC_CHK_GET(result, thread))
132  		refcount_inc(thread__refcnt(thread));
133  
134  	return result;
135  }
136  
thread__put(struct thread * thread)137  void thread__put(struct thread *thread)
138  {
139  	if (thread && refcount_dec_and_test(thread__refcnt(thread)))
140  		thread__delete(thread);
141  	else
142  		RC_CHK_PUT(thread);
143  }
144  
__thread__namespaces(struct thread * thread)145  static struct namespaces *__thread__namespaces(struct thread *thread)
146  {
147  	if (list_empty(thread__namespaces_list(thread)))
148  		return NULL;
149  
150  	return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
151  }
152  
thread__namespaces(struct thread * thread)153  struct namespaces *thread__namespaces(struct thread *thread)
154  {
155  	struct namespaces *ns;
156  
157  	down_read(thread__namespaces_lock(thread));
158  	ns = __thread__namespaces(thread);
159  	up_read(thread__namespaces_lock(thread));
160  
161  	return ns;
162  }
163  
__thread__set_namespaces(struct thread * thread,u64 timestamp,struct perf_record_namespaces * event)164  static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
165  				    struct perf_record_namespaces *event)
166  {
167  	struct namespaces *new, *curr = __thread__namespaces(thread);
168  
169  	new = namespaces__new(event);
170  	if (!new)
171  		return -ENOMEM;
172  
173  	list_add(&new->list, thread__namespaces_list(thread));
174  
175  	if (timestamp && curr) {
176  		/*
177  		 * setns syscall must have changed few or all the namespaces
178  		 * of this thread. Update end time for the namespaces
179  		 * previously used.
180  		 */
181  		curr = list_next_entry(new, list);
182  		curr->end_time = timestamp;
183  	}
184  
185  	return 0;
186  }
187  
thread__set_namespaces(struct thread * thread,u64 timestamp,struct perf_record_namespaces * event)188  int thread__set_namespaces(struct thread *thread, u64 timestamp,
189  			   struct perf_record_namespaces *event)
190  {
191  	int ret;
192  
193  	down_write(thread__namespaces_lock(thread));
194  	ret = __thread__set_namespaces(thread, timestamp, event);
195  	up_write(thread__namespaces_lock(thread));
196  	return ret;
197  }
198  
thread__comm(struct thread * thread)199  struct comm *thread__comm(struct thread *thread)
200  {
201  	if (list_empty(thread__comm_list(thread)))
202  		return NULL;
203  
204  	return list_first_entry(thread__comm_list(thread), struct comm, list);
205  }
206  
thread__exec_comm(struct thread * thread)207  struct comm *thread__exec_comm(struct thread *thread)
208  {
209  	struct comm *comm, *last = NULL, *second_last = NULL;
210  
211  	list_for_each_entry(comm, thread__comm_list(thread), list) {
212  		if (comm->exec)
213  			return comm;
214  		second_last = last;
215  		last = comm;
216  	}
217  
218  	/*
219  	 * 'last' with no start time might be the parent's comm of a synthesized
220  	 * thread (created by processing a synthesized fork event). For a main
221  	 * thread, that is very probably wrong. Prefer a later comm to avoid
222  	 * that case.
223  	 */
224  	if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
225  		return second_last;
226  
227  	return last;
228  }
229  
____thread__set_comm(struct thread * thread,const char * str,u64 timestamp,bool exec)230  static int ____thread__set_comm(struct thread *thread, const char *str,
231  				u64 timestamp, bool exec)
232  {
233  	struct comm *new, *curr = thread__comm(thread);
234  
235  	/* Override the default :tid entry */
236  	if (!thread__comm_set(thread)) {
237  		int err = comm__override(curr, str, timestamp, exec);
238  		if (err)
239  			return err;
240  	} else {
241  		new = comm__new(str, timestamp, exec);
242  		if (!new)
243  			return -ENOMEM;
244  		list_add(&new->list, thread__comm_list(thread));
245  
246  		if (exec)
247  			unwind__flush_access(thread__maps(thread));
248  	}
249  
250  	thread__set_comm_set(thread, true);
251  
252  	return 0;
253  }
254  
__thread__set_comm(struct thread * thread,const char * str,u64 timestamp,bool exec)255  int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
256  		       bool exec)
257  {
258  	int ret;
259  
260  	down_write(thread__comm_lock(thread));
261  	ret = ____thread__set_comm(thread, str, timestamp, exec);
262  	up_write(thread__comm_lock(thread));
263  	return ret;
264  }
265  
thread__set_comm_from_proc(struct thread * thread)266  int thread__set_comm_from_proc(struct thread *thread)
267  {
268  	char path[64];
269  	char *comm = NULL;
270  	size_t sz;
271  	int err = -1;
272  
273  	if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
274  		       thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
275  	    procfs__read_str(path, &comm, &sz) == 0) {
276  		comm[sz - 1] = '\0';
277  		err = thread__set_comm(thread, comm, 0);
278  	}
279  
280  	return err;
281  }
282  
__thread__comm_str(struct thread * thread)283  static const char *__thread__comm_str(struct thread *thread)
284  {
285  	const struct comm *comm = thread__comm(thread);
286  
287  	if (!comm)
288  		return NULL;
289  
290  	return comm__str(comm);
291  }
292  
thread__comm_str(struct thread * thread)293  const char *thread__comm_str(struct thread *thread)
294  {
295  	const char *str;
296  
297  	down_read(thread__comm_lock(thread));
298  	str = __thread__comm_str(thread);
299  	up_read(thread__comm_lock(thread));
300  
301  	return str;
302  }
303  
__thread__comm_len(struct thread * thread,const char * comm)304  static int __thread__comm_len(struct thread *thread, const char *comm)
305  {
306  	if (!comm)
307  		return 0;
308  	thread__set_comm_len(thread, strlen(comm));
309  
310  	return thread__var_comm_len(thread);
311  }
312  
313  /* CHECKME: it should probably better return the max comm len from its comm list */
thread__comm_len(struct thread * thread)314  int thread__comm_len(struct thread *thread)
315  {
316  	int comm_len = thread__var_comm_len(thread);
317  
318  	if (!comm_len) {
319  		const char *comm;
320  
321  		down_read(thread__comm_lock(thread));
322  		comm = __thread__comm_str(thread);
323  		comm_len = __thread__comm_len(thread, comm);
324  		up_read(thread__comm_lock(thread));
325  	}
326  
327  	return comm_len;
328  }
329  
thread__fprintf(struct thread * thread,FILE * fp)330  size_t thread__fprintf(struct thread *thread, FILE *fp)
331  {
332  	return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
333  	       maps__fprintf(thread__maps(thread), fp);
334  }
335  
thread__insert_map(struct thread * thread,struct map * map)336  int thread__insert_map(struct thread *thread, struct map *map)
337  {
338  	int ret;
339  
340  	ret = unwind__prepare_access(thread__maps(thread), map, NULL);
341  	if (ret)
342  		return ret;
343  
344  	return maps__fixup_overlap_and_insert(thread__maps(thread), map);
345  }
346  
347  struct thread__prepare_access_maps_cb_args {
348  	int err;
349  	struct maps *maps;
350  };
351  
thread__prepare_access_maps_cb(struct map * map,void * data)352  static int thread__prepare_access_maps_cb(struct map *map, void *data)
353  {
354  	bool initialized = false;
355  	struct thread__prepare_access_maps_cb_args *args = data;
356  
357  	args->err = unwind__prepare_access(args->maps, map, &initialized);
358  
359  	return (args->err || initialized) ? 1 : 0;
360  }
361  
thread__prepare_access(struct thread * thread)362  static int thread__prepare_access(struct thread *thread)
363  {
364  	struct thread__prepare_access_maps_cb_args args = {
365  		.err = 0,
366  	};
367  
368  	if (dwarf_callchain_users) {
369  		args.maps = thread__maps(thread);
370  		maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args);
371  	}
372  
373  	return args.err;
374  }
375  
thread__clone_maps(struct thread * thread,struct thread * parent,bool do_maps_clone)376  static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
377  {
378  	/* This is new thread, we share map groups for process. */
379  	if (thread__pid(thread) == thread__pid(parent))
380  		return thread__prepare_access(thread);
381  
382  	if (maps__equal(thread__maps(thread), thread__maps(parent))) {
383  		pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
384  			 thread__pid(thread), thread__tid(thread),
385  			 thread__pid(parent), thread__tid(parent));
386  		return 0;
387  	}
388  	/* But this one is new process, copy maps. */
389  	return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0;
390  }
391  
thread__fork(struct thread * thread,struct thread * parent,u64 timestamp,bool do_maps_clone)392  int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
393  {
394  	if (thread__comm_set(parent)) {
395  		const char *comm = thread__comm_str(parent);
396  		int err;
397  		if (!comm)
398  			return -ENOMEM;
399  		err = thread__set_comm(thread, comm, timestamp);
400  		if (err)
401  			return err;
402  	}
403  
404  	thread__set_ppid(thread, thread__tid(parent));
405  	return thread__clone_maps(thread, parent, do_maps_clone);
406  }
407  
thread__find_cpumode_addr_location(struct thread * thread,u64 addr,struct addr_location * al)408  void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
409  					struct addr_location *al)
410  {
411  	size_t i;
412  	const u8 cpumodes[] = {
413  		PERF_RECORD_MISC_USER,
414  		PERF_RECORD_MISC_KERNEL,
415  		PERF_RECORD_MISC_GUEST_USER,
416  		PERF_RECORD_MISC_GUEST_KERNEL
417  	};
418  
419  	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
420  		thread__find_symbol(thread, cpumodes[i], addr, al);
421  		if (al->map)
422  			break;
423  	}
424  }
425  
thread__main_thread(struct machine * machine,struct thread * thread)426  struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
427  {
428  	if (thread__pid(thread) == thread__tid(thread))
429  		return thread__get(thread);
430  
431  	if (thread__pid(thread) == -1)
432  		return NULL;
433  
434  	return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
435  }
436  
thread__memcpy(struct thread * thread,struct machine * machine,void * buf,u64 ip,int len,bool * is64bit)437  int thread__memcpy(struct thread *thread, struct machine *machine,
438  		   void *buf, u64 ip, int len, bool *is64bit)
439  {
440  	u8 cpumode = PERF_RECORD_MISC_USER;
441  	struct addr_location al;
442  	struct dso *dso;
443  	long offset;
444  
445  	if (machine__kernel_ip(machine, ip))
446  		cpumode = PERF_RECORD_MISC_KERNEL;
447  
448  	addr_location__init(&al);
449  	if (!thread__find_map(thread, cpumode, ip, &al)) {
450  		addr_location__exit(&al);
451  		return -1;
452  	}
453  
454  	dso = map__dso(al.map);
455  
456  	if (!dso || dso__data(dso)->status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
457  		addr_location__exit(&al);
458  		return -1;
459  	}
460  
461  	offset = map__map_ip(al.map, ip);
462  	if (is64bit)
463  		*is64bit = dso__is_64_bit(dso);
464  
465  	addr_location__exit(&al);
466  
467  	return dso__data_read_offset(dso, machine, offset, buf, len);
468  }
469  
thread__free_stitch_list(struct thread * thread)470  void thread__free_stitch_list(struct thread *thread)
471  {
472  	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
473  	struct stitch_list *pos, *tmp;
474  
475  	if (!lbr_stitch)
476  		return;
477  
478  	list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
479  		map_symbol__exit(&pos->cursor.ms);
480  		list_del_init(&pos->node);
481  		free(pos);
482  	}
483  
484  	list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
485  		list_del_init(&pos->node);
486  		free(pos);
487  	}
488  
489  	for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
490  		map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
491  
492  	zfree(&lbr_stitch->prev_lbr_cursor);
493  	free(thread__lbr_stitch(thread));
494  	thread__set_lbr_stitch(thread, NULL);
495  }
496