1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Infrastructure to took into function calls and returns.
4   * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5   * Mostly borrowed from function tracer which
6   * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7   *
8   * Highly modified by Steven Rostedt (VMware).
9   */
10  #include <linux/bits.h>
11  #include <linux/jump_label.h>
12  #include <linux/suspend.h>
13  #include <linux/ftrace.h>
14  #include <linux/static_call.h>
15  #include <linux/slab.h>
16  
17  #include <trace/events/sched.h>
18  
19  #include "ftrace_internal.h"
20  #include "trace.h"
21  
22  /*
23   * FGRAPH_FRAME_SIZE:	Size in bytes of the meta data on the shadow stack
24   * FGRAPH_FRAME_OFFSET:	Size in long words of the meta data frame
25   */
26  #define FGRAPH_FRAME_SIZE	sizeof(struct ftrace_ret_stack)
27  #define FGRAPH_FRAME_OFFSET	DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
28  
29  /*
30   * On entry to a function (via function_graph_enter()), a new fgraph frame
31   * (ftrace_ret_stack) is pushed onto the stack as well as a word that
32   * holds a bitmask and a type (called "bitmap"). The bitmap is defined as:
33   *
34   * bits:  0 -  9	offset in words from the previous ftrace_ret_stack
35   *
36   * bits: 10 - 11	Type of storage
37   *			  0 - reserved
38   *			  1 - bitmap of fgraph_array index
39   *			  2 - reserved data
40   *
41   * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP):
42   *  bits: 12 - 27	The bitmap of fgraph_ops fgraph_array index
43   *			That is, it's a bitmask of 0-15 (16 bits)
44   *			where if a corresponding ops in the fgraph_array[]
45   *			expects a callback from the return of the function
46   *			it's corresponding bit will be set.
47   *
48   *
49   * The top of the ret_stack (when not empty) will always have a reference
50   * word that points to the last fgraph frame that was saved.
51   *
52   * For reserved data:
53   *  bits: 12 - 17	The size in words that is stored
54   *  bits: 18 - 23	The index of fgraph_array, which shows who is stored
55   *
56   * That is, at the end of function_graph_enter, if the first and forth
57   * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called
58   * on the return of the function being traced, and the forth fgraph_ops
59   * stored two words of data, this is what will be on the task's shadow
60   * ret_stack: (the stack grows upward)
61   *
62   *  ret_stack[SHADOW_STACK_OFFSET]
63   * | SHADOW_STACK_TASK_VARS(ret_stack)[15]      |
64   * ...
65   * | SHADOW_STACK_TASK_VARS(ret_stack)[0]       |
66   *  ret_stack[SHADOW_STACK_MAX_OFFSET]
67   * ...
68   * |                                            | <- task->curr_ret_stack
69   * +--------------------------------------------+
70   * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET|
71   * |         *or put another way*               |
72   * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \          | This is for fgraph_ops[3].
73   * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \          | The data size is 2 words.
74   * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ |
75   * | (offset2:FGRAPH_FRAME_OFFSET+3)            | <- the offset2 is from here
76   * +--------------------------------------------+ ( It is 4 words from the ret_stack)
77   * |            STORED DATA WORD 2              |
78   * |            STORED DATA WORD 1              |
79   * +--------------------------------------------+
80   * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET|
81   * |         *or put another way*               |
82   * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \  |
83   * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ |
84   * | (offset1:FGRAPH_FRAME_OFFSET)              | <- the offset1 is from here
85   * +--------------------------------------------+
86   * | struct ftrace_ret_stack                    |
87   * |   (stores the saved ret pointer)           | <- the offset points here
88   * +--------------------------------------------+
89   * |                 (X) | (N)                  | ( N words away from
90   * |                                            |   previous ret_stack)
91   * ...
92   * ret_stack[0]
93   *
94   * If a backtrace is required, and the real return pointer needs to be
95   * fetched, then it looks at the task's curr_ret_stack offset, if it
96   * is greater than zero (reserved, or right before popped), it would mask
97   * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
98   * ftrace_ret_stack structure stored on the shadow stack.
99   */
100  
101  /*
102   * The following is for the top word on the stack:
103   *
104   *   FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
105   *   FGRAPH_TYPE (10-11) holds the type of word this is.
106   *     (RESERVED or BITMAP)
107   */
108  #define FGRAPH_FRAME_OFFSET_BITS	10
109  #define FGRAPH_FRAME_OFFSET_MASK	GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
110  
111  #define FGRAPH_TYPE_BITS	2
112  #define FGRAPH_TYPE_MASK	GENMASK(FGRAPH_TYPE_BITS - 1, 0)
113  #define FGRAPH_TYPE_SHIFT	FGRAPH_FRAME_OFFSET_BITS
114  
115  enum {
116  	FGRAPH_TYPE_RESERVED	= 0,
117  	FGRAPH_TYPE_BITMAP	= 1,
118  	FGRAPH_TYPE_DATA	= 2,
119  };
120  
121  /*
122   * For BITMAP type:
123   *   FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
124   */
125  #define FGRAPH_INDEX_BITS	16
126  #define FGRAPH_INDEX_MASK	GENMASK(FGRAPH_INDEX_BITS - 1, 0)
127  #define FGRAPH_INDEX_SHIFT	(FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
128  
129  /*
130   * For DATA type:
131   *  FGRAPH_DATA (12-17) bits hold the size of data (in words)
132   *  FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
133   *
134   * Note:
135   *  data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
136   */
137  #define FGRAPH_DATA_BITS	5
138  #define FGRAPH_DATA_MASK	GENMASK(FGRAPH_DATA_BITS - 1, 0)
139  #define FGRAPH_DATA_SHIFT	(FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
140  #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
141  
142  #define FGRAPH_DATA_INDEX_BITS	4
143  #define FGRAPH_DATA_INDEX_MASK	GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
144  #define FGRAPH_DATA_INDEX_SHIFT	(FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
145  
146  #define FGRAPH_MAX_INDEX	\
147  	((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
148  
149  #define FGRAPH_ARRAY_SIZE	FGRAPH_INDEX_BITS
150  
151  /*
152   * SHADOW_STACK_SIZE:	The size in bytes of the entire shadow stack
153   * SHADOW_STACK_OFFSET:	The size in long words of the shadow stack
154   * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
155   */
156  #define SHADOW_STACK_SIZE	(PAGE_SIZE)
157  #define SHADOW_STACK_OFFSET	(SHADOW_STACK_SIZE / sizeof(long))
158  /* Leave on a buffer at the end */
159  #define SHADOW_STACK_MAX_OFFSET				\
160  	(SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
161  
162  /* RET_STACK():		Return the frame from a given @offset from task @t */
163  #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
164  
165  /*
166   * Each fgraph_ops has a reservered unsigned long at the end (top) of the
167   * ret_stack to store task specific state.
168   */
169  #define SHADOW_STACK_TASK_VARS(ret_stack) \
170  	((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
171  
172  DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173  int ftrace_graph_active;
174  
175  static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
176  static unsigned long fgraph_array_bitmask;
177  
178  /* LRU index table for fgraph_array */
179  static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
180  static int fgraph_lru_next;
181  static int fgraph_lru_last;
182  
183  /* Initialize fgraph_lru_table with unused index */
fgraph_lru_init(void)184  static void fgraph_lru_init(void)
185  {
186  	int i;
187  
188  	for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
189  		fgraph_lru_table[i] = i;
190  }
191  
192  /* Release the used index to the LRU table */
fgraph_lru_release_index(int idx)193  static int fgraph_lru_release_index(int idx)
194  {
195  	if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
196  	    WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
197  		return -1;
198  
199  	fgraph_lru_table[fgraph_lru_last] = idx;
200  	fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
201  
202  	clear_bit(idx, &fgraph_array_bitmask);
203  	return 0;
204  }
205  
206  /* Allocate a new index from LRU table */
fgraph_lru_alloc_index(void)207  static int fgraph_lru_alloc_index(void)
208  {
209  	int idx = fgraph_lru_table[fgraph_lru_next];
210  
211  	/* No id is available */
212  	if (idx == -1)
213  		return -1;
214  
215  	fgraph_lru_table[fgraph_lru_next] = -1;
216  	fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
217  
218  	set_bit(idx, &fgraph_array_bitmask);
219  	return idx;
220  }
221  
222  /* Get the offset to the fgraph frame from a ret_stack value */
__get_offset(unsigned long val)223  static inline int __get_offset(unsigned long val)
224  {
225  	return val & FGRAPH_FRAME_OFFSET_MASK;
226  }
227  
228  /* Get the type of word from a ret_stack value */
__get_type(unsigned long val)229  static inline int __get_type(unsigned long val)
230  {
231  	return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
232  }
233  
234  /* Get the data_index for a DATA type ret_stack word */
__get_data_index(unsigned long val)235  static inline int __get_data_index(unsigned long val)
236  {
237  	return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
238  }
239  
240  /* Get the data_size for a DATA type ret_stack word */
__get_data_size(unsigned long val)241  static inline int __get_data_size(unsigned long val)
242  {
243  	return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
244  }
245  
246  /* Get the word from the ret_stack at @offset */
get_fgraph_entry(struct task_struct * t,int offset)247  static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
248  {
249  	return t->ret_stack[offset];
250  }
251  
252  /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
get_frame_offset(struct task_struct * t,int offset)253  static inline int get_frame_offset(struct task_struct *t, int offset)
254  {
255  	return __get_offset(t->ret_stack[offset]);
256  }
257  
258  /* For BITMAP type: get the bitmask from the @offset at ret_stack */
259  static inline unsigned long
get_bitmap_bits(struct task_struct * t,int offset)260  get_bitmap_bits(struct task_struct *t, int offset)
261  {
262  	return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
263  }
264  
265  /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
266  static inline void
set_bitmap(struct task_struct * t,int offset,unsigned long bitmap)267  set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
268  {
269  	t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
270  		(FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
271  }
272  
273  /* For DATA type: get the data saved under the ret_stack word at @offset */
get_data_type_data(struct task_struct * t,int offset)274  static inline void *get_data_type_data(struct task_struct *t, int offset)
275  {
276  	unsigned long val = t->ret_stack[offset];
277  
278  	if (__get_type(val) != FGRAPH_TYPE_DATA)
279  		return NULL;
280  	offset -= __get_data_size(val);
281  	return (void *)&t->ret_stack[offset];
282  }
283  
284  /* Create the ret_stack word for a DATA type */
make_data_type_val(int idx,int size,int offset)285  static inline unsigned long make_data_type_val(int idx, int size, int offset)
286  {
287  	return (idx << FGRAPH_DATA_INDEX_SHIFT) |
288  		((size - 1) << FGRAPH_DATA_SHIFT) |
289  		(FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
290  }
291  
292  /* ftrace_graph_entry set to this to tell some archs to run function graph */
entry_run(struct ftrace_graph_ent * trace,struct fgraph_ops * ops)293  static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops)
294  {
295  	return 0;
296  }
297  
298  /* ftrace_graph_return set to this to tell some archs to run function graph */
return_run(struct ftrace_graph_ret * trace,struct fgraph_ops * ops)299  static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops)
300  {
301  }
302  
ret_stack_set_task_var(struct task_struct * t,int idx,long val)303  static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
304  {
305  	unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
306  
307  	gvals[idx] = val;
308  }
309  
310  static unsigned long *
ret_stack_get_task_var(struct task_struct * t,int idx)311  ret_stack_get_task_var(struct task_struct *t, int idx)
312  {
313  	unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
314  
315  	return &gvals[idx];
316  }
317  
ret_stack_init_task_vars(unsigned long * ret_stack)318  static void ret_stack_init_task_vars(unsigned long *ret_stack)
319  {
320  	unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
321  
322  	memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
323  }
324  
325  /**
326   * fgraph_reserve_data - Reserve storage on the task's ret_stack
327   * @idx:	The index of fgraph_array
328   * @size_bytes: The size in bytes to reserve
329   *
330   * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the
331   * task's ret_stack shadow stack, for a given fgraph_ops during
332   * the entryfunc() call. If entryfunc() returns zero, the storage
333   * is discarded. An entryfunc() can only call this once per iteration.
334   * The fgraph_ops retfunc() can retrieve this stored data with
335   * fgraph_retrieve_data().
336   *
337   * Returns: On success, a pointer to the data on the stack.
338   *   Otherwise, NULL if there's not enough space left on the
339   *   ret_stack for the data, or if fgraph_reserve_data() was called
340   *   more than once for a single entryfunc() call.
341   */
fgraph_reserve_data(int idx,int size_bytes)342  void *fgraph_reserve_data(int idx, int size_bytes)
343  {
344  	unsigned long val;
345  	void *data;
346  	int curr_ret_stack = current->curr_ret_stack;
347  	int data_size;
348  
349  	if (size_bytes > FGRAPH_MAX_DATA_SIZE)
350  		return NULL;
351  
352  	/* Convert the data size to number of longs. */
353  	data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
354  
355  	val = get_fgraph_entry(current, curr_ret_stack - 1);
356  	data = &current->ret_stack[curr_ret_stack];
357  
358  	curr_ret_stack += data_size + 1;
359  	if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
360  		return NULL;
361  
362  	val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
363  
364  	/* Set the last word to be reserved */
365  	current->ret_stack[curr_ret_stack - 1] = val;
366  
367  	/* Make sure interrupts see this */
368  	barrier();
369  	current->curr_ret_stack = curr_ret_stack;
370  	/* Again sync with interrupts, and reset reserve */
371  	current->ret_stack[curr_ret_stack - 1] = val;
372  
373  	return data;
374  }
375  
376  /**
377   * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
378   * @idx:	the index of fgraph_array (fgraph_ops::idx)
379   * @size_bytes: pointer to retrieved data size.
380   *
381   * This is to be called by a fgraph_ops retfunc(), to retrieve data that
382   * was stored by the fgraph_ops entryfunc() on the function entry.
383   * That is, this will retrieve the data that was reserved on the
384   * entry of the function that corresponds to the exit of the function
385   * that the fgraph_ops retfunc() is called on.
386   *
387   * Returns: The stored data from fgraph_reserve_data() called by the
388   *    matching entryfunc() for the retfunc() this is called from.
389   *   Or NULL if there was nothing stored.
390   */
fgraph_retrieve_data(int idx,int * size_bytes)391  void *fgraph_retrieve_data(int idx, int *size_bytes)
392  {
393  	int offset = current->curr_ret_stack - 1;
394  	unsigned long val;
395  
396  	val = get_fgraph_entry(current, offset);
397  	while (__get_type(val) == FGRAPH_TYPE_DATA) {
398  		if (__get_data_index(val) == idx)
399  			goto found;
400  		offset -= __get_data_size(val) + 1;
401  		val = get_fgraph_entry(current, offset);
402  	}
403  	return NULL;
404  found:
405  	if (size_bytes)
406  		*size_bytes = __get_data_size(val) * sizeof(long);
407  	return get_data_type_data(current, offset);
408  }
409  
410  /**
411   * fgraph_get_task_var - retrieve a task specific state variable
412   * @gops: The ftrace_ops that owns the task specific variable
413   *
414   * Every registered fgraph_ops has a task state variable
415   * reserved on the task's ret_stack. This function returns the
416   * address to that variable.
417   *
418   * Returns the address to the fgraph_ops @gops tasks specific
419   * unsigned long variable.
420   */
fgraph_get_task_var(struct fgraph_ops * gops)421  unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
422  {
423  	return ret_stack_get_task_var(current, gops->idx);
424  }
425  
426  /*
427   * @offset: The offset into @t->ret_stack to find the ret_stack entry
428   * @frame_offset: Where to place the offset into @t->ret_stack of that entry
429   *
430   * Returns a pointer to the previous ret_stack below @offset or NULL
431   *   when it reaches the bottom of the stack.
432   *
433   * Calling this with:
434   *
435   *   offset = task->curr_ret_stack;
436   *   do {
437   *	ret_stack = get_ret_stack(task, offset, &offset);
438   *   } while (ret_stack);
439   *
440   * Will iterate through all the ret_stack entries from curr_ret_stack
441   * down to the first one.
442   */
443  static inline struct ftrace_ret_stack *
get_ret_stack(struct task_struct * t,int offset,int * frame_offset)444  get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
445  {
446  	int offs;
447  
448  	BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
449  
450  	if (unlikely(offset <= 0))
451  		return NULL;
452  
453  	offs = get_frame_offset(t, --offset);
454  	if (WARN_ON_ONCE(offs <= 0 || offs > offset))
455  		return NULL;
456  
457  	offset -= offs;
458  
459  	*frame_offset = offset;
460  	return RET_STACK(t, offset);
461  }
462  
463  /* Both enabled by default (can be cleared by function_graph tracer flags */
464  static bool fgraph_sleep_time = true;
465  
466  #ifdef CONFIG_DYNAMIC_FTRACE
467  /*
468   * archs can override this function if they must do something
469   * to enable hook for graph tracer.
470   */
ftrace_enable_ftrace_graph_caller(void)471  int __weak ftrace_enable_ftrace_graph_caller(void)
472  {
473  	return 0;
474  }
475  
476  /*
477   * archs can override this function if they must do something
478   * to disable hook for graph tracer.
479   */
ftrace_disable_ftrace_graph_caller(void)480  int __weak ftrace_disable_ftrace_graph_caller(void)
481  {
482  	return 0;
483  }
484  #endif
485  
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)486  int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
487  			    struct fgraph_ops *gops)
488  {
489  	return 0;
490  }
491  
ftrace_graph_ret_stub(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)492  static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
493  				  struct fgraph_ops *gops)
494  {
495  }
496  
497  static struct fgraph_ops fgraph_stub = {
498  	.entryfunc = ftrace_graph_entry_stub,
499  	.retfunc = ftrace_graph_ret_stub,
500  };
501  
502  static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
503  DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
504  DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
505  static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
506  
507  /**
508   * ftrace_graph_stop - set to permanently disable function graph tracing
509   *
510   * In case of an error int function graph tracing, this is called
511   * to try to keep function graph tracing from causing any more harm.
512   * Usually this is pretty severe and this is called to try to at least
513   * get a warning out to the user.
514   */
ftrace_graph_stop(void)515  void ftrace_graph_stop(void)
516  {
517  	static_branch_enable(&kill_ftrace_graph);
518  }
519  
520  /* Add a function return address to the trace stack on thread info.*/
521  static int
ftrace_push_return_trace(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp,int fgraph_idx)522  ftrace_push_return_trace(unsigned long ret, unsigned long func,
523  			 unsigned long frame_pointer, unsigned long *retp,
524  			 int fgraph_idx)
525  {
526  	struct ftrace_ret_stack *ret_stack;
527  	unsigned long long calltime;
528  	unsigned long val;
529  	int offset;
530  
531  	if (unlikely(ftrace_graph_is_dead()))
532  		return -EBUSY;
533  
534  	if (!current->ret_stack)
535  		return -EBUSY;
536  
537  	BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
538  
539  	/* Set val to "reserved" with the delta to the new fgraph frame */
540  	val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
541  
542  	/*
543  	 * We must make sure the ret_stack is tested before we read
544  	 * anything else.
545  	 */
546  	smp_rmb();
547  
548  	/*
549  	 * Check if there's room on the shadow stack to fit a fraph frame
550  	 * and a bitmap word.
551  	 */
552  	if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
553  		atomic_inc(&current->trace_overrun);
554  		return -EBUSY;
555  	}
556  
557  	calltime = trace_clock_local();
558  
559  	offset = READ_ONCE(current->curr_ret_stack);
560  	ret_stack = RET_STACK(current, offset);
561  	offset += FGRAPH_FRAME_OFFSET;
562  
563  	/* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */
564  	current->ret_stack[offset] = val;
565  	ret_stack->ret = ret;
566  	/*
567  	 * The unwinders expect curr_ret_stack to point to either zero
568  	 * or an offset where to find the next ret_stack. Even though the
569  	 * ret stack might be bogus, we want to write the ret and the
570  	 * offset to find the ret_stack before we increment the stack point.
571  	 * If an interrupt comes in now before we increment the curr_ret_stack
572  	 * it may blow away what we wrote. But that's fine, because the
573  	 * offset will still be correct (even though the 'ret' won't be).
574  	 * What we worry about is the offset being correct after we increment
575  	 * the curr_ret_stack and before we update that offset, as if an
576  	 * interrupt comes in and does an unwind stack dump, it will need
577  	 * at least a correct offset!
578  	 */
579  	barrier();
580  	WRITE_ONCE(current->curr_ret_stack, offset + 1);
581  	/*
582  	 * This next barrier is to ensure that an interrupt coming in
583  	 * will not corrupt what we are about to write.
584  	 */
585  	barrier();
586  
587  	/* Still keep it reserved even if an interrupt came in */
588  	current->ret_stack[offset] = val;
589  
590  	ret_stack->ret = ret;
591  	ret_stack->func = func;
592  	ret_stack->calltime = calltime;
593  #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
594  	ret_stack->fp = frame_pointer;
595  #endif
596  	ret_stack->retp = retp;
597  	return offset;
598  }
599  
600  /*
601   * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
602   * functions. But those archs currently don't support direct functions
603   * anyway, and ftrace_find_rec_direct() is just a stub for them.
604   * Define MCOUNT_INSN_SIZE to keep those archs compiling.
605   */
606  #ifndef MCOUNT_INSN_SIZE
607  /* Make sure this only works without direct calls */
608  # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
609  #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
610  # endif
611  # define MCOUNT_INSN_SIZE 0
612  #endif
613  
614  /* If the caller does not use ftrace, call this function. */
function_graph_enter(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp)615  int function_graph_enter(unsigned long ret, unsigned long func,
616  			 unsigned long frame_pointer, unsigned long *retp)
617  {
618  	struct ftrace_graph_ent trace;
619  	unsigned long bitmap = 0;
620  	int offset;
621  	int i;
622  
623  	trace.func = func;
624  	trace.depth = ++current->curr_ret_depth;
625  
626  	offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
627  	if (offset < 0)
628  		goto out;
629  
630  #ifdef CONFIG_HAVE_STATIC_CALL
631  	if (static_branch_likely(&fgraph_do_direct)) {
632  		int save_curr_ret_stack = current->curr_ret_stack;
633  
634  		if (static_call(fgraph_func)(&trace, fgraph_direct_gops))
635  			bitmap |= BIT(fgraph_direct_gops->idx);
636  		else
637  			/* Clear out any saved storage */
638  			current->curr_ret_stack = save_curr_ret_stack;
639  	} else
640  #endif
641  	{
642  		for_each_set_bit(i, &fgraph_array_bitmask,
643  					 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
644  			struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
645  			int save_curr_ret_stack;
646  
647  			if (gops == &fgraph_stub)
648  				continue;
649  
650  			save_curr_ret_stack = current->curr_ret_stack;
651  			if (ftrace_ops_test(&gops->ops, func, NULL) &&
652  			    gops->entryfunc(&trace, gops))
653  				bitmap |= BIT(i);
654  			else
655  				/* Clear out any saved storage */
656  				current->curr_ret_stack = save_curr_ret_stack;
657  		}
658  	}
659  
660  	if (!bitmap)
661  		goto out_ret;
662  
663  	/*
664  	 * Since this function uses fgraph_idx = 0 as a tail-call checking
665  	 * flag, set that bit always.
666  	 */
667  	set_bitmap(current, offset, bitmap | BIT(0));
668  
669  	return 0;
670   out_ret:
671  	current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
672   out:
673  	current->curr_ret_depth--;
674  	return -EBUSY;
675  }
676  
677  /* Retrieve a function return address to the trace stack on thread info.*/
678  static struct ftrace_ret_stack *
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer,int * offset)679  ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
680  			unsigned long frame_pointer, int *offset)
681  {
682  	struct ftrace_ret_stack *ret_stack;
683  
684  	ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
685  
686  	if (unlikely(!ret_stack)) {
687  		ftrace_graph_stop();
688  		WARN(1, "Bad function graph ret_stack pointer: %d",
689  		     current->curr_ret_stack);
690  		/* Might as well panic, otherwise we have no where to go */
691  		*ret = (unsigned long)panic;
692  		return NULL;
693  	}
694  
695  #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
696  	/*
697  	 * The arch may choose to record the frame pointer used
698  	 * and check it here to make sure that it is what we expect it
699  	 * to be. If gcc does not set the place holder of the return
700  	 * address in the frame pointer, and does a copy instead, then
701  	 * the function graph trace will fail. This test detects this
702  	 * case.
703  	 *
704  	 * Currently, x86_32 with optimize for size (-Os) makes the latest
705  	 * gcc do the above.
706  	 *
707  	 * Note, -mfentry does not use frame pointers, and this test
708  	 *  is not needed if CC_USING_FENTRY is set.
709  	 */
710  	if (unlikely(ret_stack->fp != frame_pointer)) {
711  		ftrace_graph_stop();
712  		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
713  		     "  from func %ps return to %lx\n",
714  		     ret_stack->fp,
715  		     frame_pointer,
716  		     (void *)ret_stack->func,
717  		     ret_stack->ret);
718  		*ret = (unsigned long)panic;
719  		return NULL;
720  	}
721  #endif
722  
723  	*offset += FGRAPH_FRAME_OFFSET;
724  	*ret = ret_stack->ret;
725  	trace->func = ret_stack->func;
726  	trace->calltime = ret_stack->calltime;
727  	trace->overrun = atomic_read(&current->trace_overrun);
728  	trace->depth = current->curr_ret_depth;
729  	/*
730  	 * We still want to trace interrupts coming in if
731  	 * max_depth is set to 1. Make sure the decrement is
732  	 * seen before ftrace_graph_return.
733  	 */
734  	barrier();
735  
736  	return ret_stack;
737  }
738  
739  /*
740   * Hibernation protection.
741   * The state of the current task is too much unstable during
742   * suspend/restore to disk. We want to protect against that.
743   */
744  static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)745  ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
746  							void *unused)
747  {
748  	switch (state) {
749  	case PM_HIBERNATION_PREPARE:
750  		pause_graph_tracing();
751  		break;
752  
753  	case PM_POST_HIBERNATION:
754  		unpause_graph_tracing();
755  		break;
756  	}
757  	return NOTIFY_DONE;
758  }
759  
760  static struct notifier_block ftrace_suspend_notifier = {
761  	.notifier_call = ftrace_suspend_notifier_call,
762  };
763  
764  /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
765  struct fgraph_ret_regs;
766  
767  /*
768   * Send the trace to the ring-buffer.
769   * @return the original return address.
770   */
__ftrace_return_to_handler(struct fgraph_ret_regs * ret_regs,unsigned long frame_pointer)771  static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
772  						unsigned long frame_pointer)
773  {
774  	struct ftrace_ret_stack *ret_stack;
775  	struct ftrace_graph_ret trace;
776  	unsigned long bitmap;
777  	unsigned long ret;
778  	int offset;
779  	int i;
780  
781  	ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
782  
783  	if (unlikely(!ret_stack)) {
784  		ftrace_graph_stop();
785  		WARN_ON(1);
786  		/* Might as well panic. What else to do? */
787  		return (unsigned long)panic;
788  	}
789  
790  	trace.rettime = trace_clock_local();
791  #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
792  	trace.retval = fgraph_ret_regs_return_value(ret_regs);
793  #endif
794  
795  	bitmap = get_bitmap_bits(current, offset);
796  
797  #ifdef CONFIG_HAVE_STATIC_CALL
798  	if (static_branch_likely(&fgraph_do_direct)) {
799  		if (test_bit(fgraph_direct_gops->idx, &bitmap))
800  			static_call(fgraph_retfunc)(&trace, fgraph_direct_gops);
801  	} else
802  #endif
803  	{
804  		for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
805  			struct fgraph_ops *gops = fgraph_array[i];
806  
807  			if (gops == &fgraph_stub)
808  				continue;
809  
810  			gops->retfunc(&trace, gops);
811  		}
812  	}
813  
814  	/*
815  	 * The ftrace_graph_return() may still access the current
816  	 * ret_stack structure, we need to make sure the update of
817  	 * curr_ret_stack is after that.
818  	 */
819  	barrier();
820  	current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
821  
822  	current->curr_ret_depth--;
823  	return ret;
824  }
825  
826  /*
827   * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
828   * leave only ftrace_return_to_handler(ret_regs).
829   */
830  #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
ftrace_return_to_handler(struct fgraph_ret_regs * ret_regs)831  unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
832  {
833  	return __ftrace_return_to_handler(ret_regs,
834  				fgraph_ret_regs_frame_pointer(ret_regs));
835  }
836  #else
ftrace_return_to_handler(unsigned long frame_pointer)837  unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
838  {
839  	return __ftrace_return_to_handler(NULL, frame_pointer);
840  }
841  #endif
842  
843  /**
844   * ftrace_graph_get_ret_stack - return the entry of the shadow stack
845   * @task: The task to read the shadow stack from.
846   * @idx: Index down the shadow stack
847   *
848   * Return the ret_struct on the shadow stack of the @task at the
849   * call graph at @idx starting with zero. If @idx is zero, it
850   * will return the last saved ret_stack entry. If it is greater than
851   * zero, it will return the corresponding ret_stack for the depth
852   * of saved return addresses.
853   */
854  struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct * task,int idx)855  ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
856  {
857  	struct ftrace_ret_stack *ret_stack = NULL;
858  	int offset = task->curr_ret_stack;
859  
860  	if (offset < 0)
861  		return NULL;
862  
863  	do {
864  		ret_stack = get_ret_stack(task, offset, &offset);
865  	} while (ret_stack && --idx >= 0);
866  
867  	return ret_stack;
868  }
869  
870  /**
871   * ftrace_graph_ret_addr - return the original value of the return address
872   * @task: The task the unwinder is being executed on
873   * @idx: An initialized pointer to the next stack index to use
874   * @ret: The current return address (likely pointing to return_handler)
875   * @retp: The address on the stack of the current return location
876   *
877   * This function can be called by stack unwinding code to convert a found stack
878   * return address (@ret) to its original value, in case the function graph
879   * tracer has modified it to be 'return_to_handler'.  If the address hasn't
880   * been modified, the unchanged value of @ret is returned.
881   *
882   * @idx holds the last index used to know where to start from. It should be
883   * initialized to zero for the first iteration as that will mean to start
884   * at the top of the shadow stack. If the location is found, this pointer
885   * will be assigned that location so that if called again, it will continue
886   * where it left off.
887   *
888   * @retp is a pointer to the return address on the stack.
889   */
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)890  unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
891  				    unsigned long ret, unsigned long *retp)
892  {
893  	struct ftrace_ret_stack *ret_stack;
894  	unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
895  	int i = task->curr_ret_stack;
896  
897  	if (ret != return_handler)
898  		return ret;
899  
900  	if (!idx)
901  		return ret;
902  
903  	i = *idx ? : task->curr_ret_stack;
904  	while (i > 0) {
905  		ret_stack = get_ret_stack(task, i, &i);
906  		if (!ret_stack)
907  			break;
908  		/*
909  		 * For the tail-call, there would be 2 or more ftrace_ret_stacks on
910  		 * the ret_stack, which records "return_to_handler" as the return
911  		 * address except for the last one.
912  		 * But on the real stack, there should be 1 entry because tail-call
913  		 * reuses the return address on the stack and jump to the next function.
914  		 * Thus we will continue to find real return address.
915  		 */
916  		if (ret_stack->retp == retp &&
917  		    ret_stack->ret != return_handler) {
918  			*idx = i;
919  			return ret_stack->ret;
920  		}
921  	}
922  
923  	return ret;
924  }
925  
926  static struct ftrace_ops graph_ops = {
927  	.func			= ftrace_graph_func,
928  	.flags			= FTRACE_OPS_GRAPH_STUB,
929  #ifdef FTRACE_GRAPH_TRAMP_ADDR
930  	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
931  	/* trampoline_size is only needed for dynamically allocated tramps */
932  #endif
933  };
934  
fgraph_init_ops(struct ftrace_ops * dst_ops,struct ftrace_ops * src_ops)935  void fgraph_init_ops(struct ftrace_ops *dst_ops,
936  		     struct ftrace_ops *src_ops)
937  {
938  	dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
939  
940  #ifdef CONFIG_DYNAMIC_FTRACE
941  	if (src_ops) {
942  		dst_ops->func_hash = &src_ops->local_hash;
943  		mutex_init(&dst_ops->local_hash.regex_lock);
944  		INIT_LIST_HEAD(&dst_ops->subop_list);
945  		dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
946  	}
947  #endif
948  }
949  
ftrace_graph_sleep_time_control(bool enable)950  void ftrace_graph_sleep_time_control(bool enable)
951  {
952  	fgraph_sleep_time = enable;
953  }
954  
955  /*
956   * Simply points to ftrace_stub, but with the proper protocol.
957   * Defined by the linker script in linux/vmlinux.lds.h
958   */
959  void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
960  
961  /* The callbacks that hook a function */
962  trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
963  trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
964  
965  /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(unsigned long ** ret_stack_list)966  static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
967  {
968  	int i;
969  	int ret = 0;
970  	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
971  	struct task_struct *g, *t;
972  
973  	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
974  		ret_stack_list[i] = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
975  		if (!ret_stack_list[i]) {
976  			start = 0;
977  			end = i;
978  			ret = -ENOMEM;
979  			goto free;
980  		}
981  	}
982  
983  	rcu_read_lock();
984  	for_each_process_thread(g, t) {
985  		if (start == end) {
986  			ret = -EAGAIN;
987  			goto unlock;
988  		}
989  
990  		if (t->ret_stack == NULL) {
991  			atomic_set(&t->trace_overrun, 0);
992  			ret_stack_init_task_vars(ret_stack_list[start]);
993  			t->curr_ret_stack = 0;
994  			t->curr_ret_depth = -1;
995  			/* Make sure the tasks see the 0 first: */
996  			smp_wmb();
997  			t->ret_stack = ret_stack_list[start++];
998  		}
999  	}
1000  
1001  unlock:
1002  	rcu_read_unlock();
1003  free:
1004  	for (i = start; i < end; i++)
1005  		kfree(ret_stack_list[i]);
1006  	return ret;
1007  }
1008  
1009  static void
ftrace_graph_probe_sched_switch(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1010  ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
1011  				struct task_struct *prev,
1012  				struct task_struct *next,
1013  				unsigned int prev_state)
1014  {
1015  	struct ftrace_ret_stack *ret_stack;
1016  	unsigned long long timestamp;
1017  	int offset;
1018  
1019  	/*
1020  	 * Does the user want to count the time a function was asleep.
1021  	 * If so, do not update the time stamps.
1022  	 */
1023  	if (fgraph_sleep_time)
1024  		return;
1025  
1026  	timestamp = trace_clock_local();
1027  
1028  	prev->ftrace_timestamp = timestamp;
1029  
1030  	/* only process tasks that we timestamped */
1031  	if (!next->ftrace_timestamp)
1032  		return;
1033  
1034  	/*
1035  	 * Update all the counters in next to make up for the
1036  	 * time next was sleeping.
1037  	 */
1038  	timestamp -= next->ftrace_timestamp;
1039  
1040  	for (offset = next->curr_ret_stack; offset > 0; ) {
1041  		ret_stack = get_ret_stack(next, offset, &offset);
1042  		if (ret_stack)
1043  			ret_stack->calltime += timestamp;
1044  	}
1045  }
1046  
1047  static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
1048  
1049  static void
graph_init_task(struct task_struct * t,unsigned long * ret_stack)1050  graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1051  {
1052  	atomic_set(&t->trace_overrun, 0);
1053  	ret_stack_init_task_vars(ret_stack);
1054  	t->ftrace_timestamp = 0;
1055  	t->curr_ret_stack = 0;
1056  	t->curr_ret_depth = -1;
1057  	/* make curr_ret_stack visible before we add the ret_stack */
1058  	smp_wmb();
1059  	t->ret_stack = ret_stack;
1060  }
1061  
1062  /*
1063   * Allocate a return stack for the idle task. May be the first
1064   * time through, or it may be done by CPU hotplug online.
1065   */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1066  void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
1067  {
1068  	t->curr_ret_stack = 0;
1069  	t->curr_ret_depth = -1;
1070  	/*
1071  	 * The idle task has no parent, it either has its own
1072  	 * stack or no stack at all.
1073  	 */
1074  	if (t->ret_stack)
1075  		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1076  
1077  	if (ftrace_graph_active) {
1078  		unsigned long *ret_stack;
1079  
1080  		ret_stack = per_cpu(idle_ret_stack, cpu);
1081  		if (!ret_stack) {
1082  			ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1083  			if (!ret_stack)
1084  				return;
1085  			per_cpu(idle_ret_stack, cpu) = ret_stack;
1086  		}
1087  		graph_init_task(t, ret_stack);
1088  	}
1089  }
1090  
1091  /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)1092  void ftrace_graph_init_task(struct task_struct *t)
1093  {
1094  	/* Make sure we do not use the parent ret_stack */
1095  	t->ret_stack = NULL;
1096  	t->curr_ret_stack = 0;
1097  	t->curr_ret_depth = -1;
1098  
1099  	if (ftrace_graph_active) {
1100  		unsigned long *ret_stack;
1101  
1102  		ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1103  		if (!ret_stack)
1104  			return;
1105  		graph_init_task(t, ret_stack);
1106  	}
1107  }
1108  
ftrace_graph_exit_task(struct task_struct * t)1109  void ftrace_graph_exit_task(struct task_struct *t)
1110  {
1111  	unsigned long *ret_stack = t->ret_stack;
1112  
1113  	t->ret_stack = NULL;
1114  	/* NULL must become visible to IRQs before we free it: */
1115  	barrier();
1116  
1117  	kfree(ret_stack);
1118  }
1119  
1120  #ifdef CONFIG_DYNAMIC_FTRACE
fgraph_pid_func(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)1121  static int fgraph_pid_func(struct ftrace_graph_ent *trace,
1122  			   struct fgraph_ops *gops)
1123  {
1124  	struct trace_array *tr = gops->ops.private;
1125  	int pid;
1126  
1127  	if (tr) {
1128  		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1129  		if (pid == FTRACE_PID_IGNORE)
1130  			return 0;
1131  		if (pid != FTRACE_PID_TRACE &&
1132  		    pid != current->pid)
1133  			return 0;
1134  	}
1135  
1136  	return gops->saved_func(trace, gops);
1137  }
1138  
fgraph_update_pid_func(void)1139  void fgraph_update_pid_func(void)
1140  {
1141  	struct fgraph_ops *gops;
1142  	struct ftrace_ops *op;
1143  
1144  	if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
1145  		return;
1146  
1147  	list_for_each_entry(op, &graph_ops.subop_list, list) {
1148  		if (op->flags & FTRACE_OPS_FL_PID) {
1149  			gops = container_of(op, struct fgraph_ops, ops);
1150  			gops->entryfunc = ftrace_pids_enabled(op) ?
1151  				fgraph_pid_func : gops->saved_func;
1152  			if (ftrace_graph_active == 1)
1153  				static_call_update(fgraph_func, gops->entryfunc);
1154  		}
1155  	}
1156  }
1157  #endif
1158  
1159  /* Allocate a return stack for each task */
start_graph_tracing(void)1160  static int start_graph_tracing(void)
1161  {
1162  	unsigned long **ret_stack_list;
1163  	int ret;
1164  
1165  	ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
1166  				 sizeof(*ret_stack_list), GFP_KERNEL);
1167  
1168  	if (!ret_stack_list)
1169  		return -ENOMEM;
1170  
1171  	do {
1172  		ret = alloc_retstack_tasklist(ret_stack_list);
1173  	} while (ret == -EAGAIN);
1174  
1175  	if (!ret) {
1176  		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1177  		if (ret)
1178  			pr_info("ftrace_graph: Couldn't activate tracepoint"
1179  				" probe to kernel_sched_switch\n");
1180  	}
1181  
1182  	kfree(ret_stack_list);
1183  	return ret;
1184  }
1185  
init_task_vars(int idx)1186  static void init_task_vars(int idx)
1187  {
1188  	struct task_struct *g, *t;
1189  	int cpu;
1190  
1191  	for_each_online_cpu(cpu) {
1192  		if (idle_task(cpu)->ret_stack)
1193  			ret_stack_set_task_var(idle_task(cpu), idx, 0);
1194  	}
1195  
1196  	read_lock(&tasklist_lock);
1197  	for_each_process_thread(g, t) {
1198  		if (t->ret_stack)
1199  			ret_stack_set_task_var(t, idx, 0);
1200  	}
1201  	read_unlock(&tasklist_lock);
1202  }
1203  
ftrace_graph_enable_direct(bool enable_branch,struct fgraph_ops * gops)1204  static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
1205  {
1206  	trace_func_graph_ent_t func = NULL;
1207  	trace_func_graph_ret_t retfunc = NULL;
1208  	int i;
1209  
1210  	if (gops) {
1211  		func = gops->entryfunc;
1212  		retfunc = gops->retfunc;
1213  		fgraph_direct_gops = gops;
1214  	} else {
1215  		for_each_set_bit(i, &fgraph_array_bitmask,
1216  				 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
1217  			func = fgraph_array[i]->entryfunc;
1218  			retfunc = fgraph_array[i]->retfunc;
1219  			fgraph_direct_gops = fgraph_array[i];
1220  		}
1221  	}
1222  	if (WARN_ON_ONCE(!func))
1223  		return;
1224  
1225  	static_call_update(fgraph_func, func);
1226  	static_call_update(fgraph_retfunc, retfunc);
1227  	if (enable_branch)
1228  		static_branch_disable(&fgraph_do_direct);
1229  }
1230  
ftrace_graph_disable_direct(bool disable_branch)1231  static void ftrace_graph_disable_direct(bool disable_branch)
1232  {
1233  	if (disable_branch)
1234  		static_branch_disable(&fgraph_do_direct);
1235  	static_call_update(fgraph_func, ftrace_graph_entry_stub);
1236  	static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
1237  	fgraph_direct_gops = &fgraph_stub;
1238  }
1239  
1240  /* The cpu_boot init_task->ret_stack will never be freed */
fgraph_cpu_init(unsigned int cpu)1241  static int fgraph_cpu_init(unsigned int cpu)
1242  {
1243  	if (!idle_task(cpu)->ret_stack)
1244  		ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1245  	return 0;
1246  }
1247  
register_ftrace_graph(struct fgraph_ops * gops)1248  int register_ftrace_graph(struct fgraph_ops *gops)
1249  {
1250  	static bool fgraph_initialized;
1251  	int command = 0;
1252  	int ret = 0;
1253  	int i = -1;
1254  
1255  	guard(mutex)(&ftrace_lock);
1256  
1257  	if (!fgraph_initialized) {
1258  		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
1259  					fgraph_cpu_init, NULL);
1260  		if (ret < 0) {
1261  			pr_warn("fgraph: Error to init cpu hotplug support\n");
1262  			return ret;
1263  		}
1264  		fgraph_initialized = true;
1265  		ret = 0;
1266  	}
1267  
1268  	if (!fgraph_array[0]) {
1269  		/* The array must always have real data on it */
1270  		for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1271  			fgraph_array[i] = &fgraph_stub;
1272  		fgraph_lru_init();
1273  	}
1274  
1275  	i = fgraph_lru_alloc_index();
1276  	if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub))
1277  		return -ENOSPC;
1278  	gops->idx = i;
1279  
1280  	ftrace_graph_active++;
1281  
1282  	if (ftrace_graph_active == 2)
1283  		ftrace_graph_disable_direct(true);
1284  
1285  	if (ftrace_graph_active == 1) {
1286  		ftrace_graph_enable_direct(false, gops);
1287  		register_pm_notifier(&ftrace_suspend_notifier);
1288  		ret = start_graph_tracing();
1289  		if (ret)
1290  			goto error;
1291  		/*
1292  		 * Some archs just test to see if these are not
1293  		 * the default function
1294  		 */
1295  		ftrace_graph_return = return_run;
1296  		ftrace_graph_entry = entry_run;
1297  		command = FTRACE_START_FUNC_RET;
1298  	} else {
1299  		init_task_vars(gops->idx);
1300  	}
1301  	/* Always save the function, and reset at unregistering */
1302  	gops->saved_func = gops->entryfunc;
1303  
1304  	ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
1305  	if (!ret)
1306  		fgraph_array[i] = gops;
1307  
1308  error:
1309  	if (ret) {
1310  		ftrace_graph_active--;
1311  		gops->saved_func = NULL;
1312  		fgraph_lru_release_index(i);
1313  	}
1314  	return ret;
1315  }
1316  
unregister_ftrace_graph(struct fgraph_ops * gops)1317  void unregister_ftrace_graph(struct fgraph_ops *gops)
1318  {
1319  	int command = 0;
1320  
1321  	mutex_lock(&ftrace_lock);
1322  
1323  	if (unlikely(!ftrace_graph_active))
1324  		goto out;
1325  
1326  	if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
1327  		     fgraph_array[gops->idx] != gops))
1328  		goto out;
1329  
1330  	if (fgraph_lru_release_index(gops->idx) < 0)
1331  		goto out;
1332  
1333  	fgraph_array[gops->idx] = &fgraph_stub;
1334  
1335  	ftrace_graph_active--;
1336  
1337  	if (!ftrace_graph_active)
1338  		command = FTRACE_STOP_FUNC_RET;
1339  
1340  	ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
1341  
1342  	if (ftrace_graph_active == 1)
1343  		ftrace_graph_enable_direct(true, NULL);
1344  	else if (!ftrace_graph_active)
1345  		ftrace_graph_disable_direct(false);
1346  
1347  	if (!ftrace_graph_active) {
1348  		ftrace_graph_return = ftrace_stub_graph;
1349  		ftrace_graph_entry = ftrace_graph_entry_stub;
1350  		unregister_pm_notifier(&ftrace_suspend_notifier);
1351  		unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1352  	}
1353   out:
1354  	gops->saved_func = NULL;
1355  	mutex_unlock(&ftrace_lock);
1356  }
1357