1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3   */
4  #ifndef _LINUX_BPF_VERIFIER_H
5  #define _LINUX_BPF_VERIFIER_H 1
6  
7  #include <linux/bpf.h> /* for enum bpf_reg_type */
8  #include <linux/btf.h> /* for struct btf and btf_id() */
9  #include <linux/filter.h> /* for MAX_BPF_STACK */
10  #include <linux/tnum.h>
11  
12  /* Maximum variable offset umax_value permitted when resolving memory accesses.
13   * In practice this is far bigger than any realistic pointer offset; this limit
14   * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15   */
16  #define BPF_MAX_VAR_OFF	(1 << 29)
17  /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
18   * that converting umax_value to int cannot overflow.
19   */
20  #define BPF_MAX_VAR_SIZ	(1 << 29)
21  /* size of tmp_str_buf in bpf_verifier.
22   * we need at least 306 bytes to fit full stack mask representation
23   * (in the "-8,-16,...,-512" form)
24   */
25  #define TMP_STR_BUF_LEN 320
26  /* Patch buffer size */
27  #define INSN_BUF_SIZE 32
28  
29  /* Liveness marks, used for registers and spilled-regs (in stack slots).
30   * Read marks propagate upwards until they find a write mark; they record that
31   * "one of this state's descendants read this reg" (and therefore the reg is
32   * relevant for states_equal() checks).
33   * Write marks collect downwards and do not propagate; they record that "the
34   * straight-line code that reached this state (from its parent) wrote this reg"
35   * (and therefore that reads propagated from this state or its descendants
36   * should not propagate to its parent).
37   * A state with a write mark can receive read marks; it just won't propagate
38   * them to its parent, since the write mark is a property, not of the state,
39   * but of the link between it and its parent.  See mark_reg_read() and
40   * mark_stack_slot_read() in kernel/bpf/verifier.c.
41   */
42  enum bpf_reg_liveness {
43  	REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
44  	REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
45  	REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
46  	REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
47  	REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
48  	REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
49  };
50  
51  /* For every reg representing a map value or allocated object pointer,
52   * we consider the tuple of (ptr, id) for them to be unique in verifier
53   * context and conside them to not alias each other for the purposes of
54   * tracking lock state.
55   */
56  struct bpf_active_lock {
57  	/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
58  	 * there's no active lock held, and other fields have no
59  	 * meaning. If non-NULL, it indicates that a lock is held and
60  	 * id member has the reg->id of the register which can be >= 0.
61  	 */
62  	void *ptr;
63  	/* This will be reg->id */
64  	u32 id;
65  };
66  
67  #define ITER_PREFIX "bpf_iter_"
68  
69  enum bpf_iter_state {
70  	BPF_ITER_STATE_INVALID, /* for non-first slot */
71  	BPF_ITER_STATE_ACTIVE,
72  	BPF_ITER_STATE_DRAINED,
73  };
74  
75  struct bpf_reg_state {
76  	/* Ordering of fields matters.  See states_equal() */
77  	enum bpf_reg_type type;
78  	/*
79  	 * Fixed part of pointer offset, pointer types only.
80  	 * Or constant delta between "linked" scalars with the same ID.
81  	 */
82  	s32 off;
83  	union {
84  		/* valid when type == PTR_TO_PACKET */
85  		int range;
86  
87  		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
88  		 *   PTR_TO_MAP_VALUE_OR_NULL
89  		 */
90  		struct {
91  			struct bpf_map *map_ptr;
92  			/* To distinguish map lookups from outer map
93  			 * the map_uid is non-zero for registers
94  			 * pointing to inner maps.
95  			 */
96  			u32 map_uid;
97  		};
98  
99  		/* for PTR_TO_BTF_ID */
100  		struct {
101  			struct btf *btf;
102  			u32 btf_id;
103  		};
104  
105  		struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
106  			u32 mem_size;
107  			u32 dynptr_id; /* for dynptr slices */
108  		};
109  
110  		/* For dynptr stack slots */
111  		struct {
112  			enum bpf_dynptr_type type;
113  			/* A dynptr is 16 bytes so it takes up 2 stack slots.
114  			 * We need to track which slot is the first slot
115  			 * to protect against cases where the user may try to
116  			 * pass in an address starting at the second slot of the
117  			 * dynptr.
118  			 */
119  			bool first_slot;
120  		} dynptr;
121  
122  		/* For bpf_iter stack slots */
123  		struct {
124  			/* BTF container and BTF type ID describing
125  			 * struct bpf_iter_<type> of an iterator state
126  			 */
127  			struct btf *btf;
128  			u32 btf_id;
129  			/* packing following two fields to fit iter state into 16 bytes */
130  			enum bpf_iter_state state:2;
131  			int depth:30;
132  		} iter;
133  
134  		/* Max size from any of the above. */
135  		struct {
136  			unsigned long raw1;
137  			unsigned long raw2;
138  		} raw;
139  
140  		u32 subprogno; /* for PTR_TO_FUNC */
141  	};
142  	/* For scalar types (SCALAR_VALUE), this represents our knowledge of
143  	 * the actual value.
144  	 * For pointer types, this represents the variable part of the offset
145  	 * from the pointed-to object, and is shared with all bpf_reg_states
146  	 * with the same id as us.
147  	 */
148  	struct tnum var_off;
149  	/* Used to determine if any memory access using this register will
150  	 * result in a bad access.
151  	 * These refer to the same value as var_off, not necessarily the actual
152  	 * contents of the register.
153  	 */
154  	s64 smin_value; /* minimum possible (s64)value */
155  	s64 smax_value; /* maximum possible (s64)value */
156  	u64 umin_value; /* minimum possible (u64)value */
157  	u64 umax_value; /* maximum possible (u64)value */
158  	s32 s32_min_value; /* minimum possible (s32)value */
159  	s32 s32_max_value; /* maximum possible (s32)value */
160  	u32 u32_min_value; /* minimum possible (u32)value */
161  	u32 u32_max_value; /* maximum possible (u32)value */
162  	/* For PTR_TO_PACKET, used to find other pointers with the same variable
163  	 * offset, so they can share range knowledge.
164  	 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
165  	 * came from, when one is tested for != NULL.
166  	 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
167  	 * for the purpose of tracking that it's freed.
168  	 * For PTR_TO_SOCKET this is used to share which pointers retain the
169  	 * same reference to the socket, to determine proper reference freeing.
170  	 * For stack slots that are dynptrs, this is used to track references to
171  	 * the dynptr to determine proper reference freeing.
172  	 * Similarly to dynptrs, we use ID to track "belonging" of a reference
173  	 * to a specific instance of bpf_iter.
174  	 */
175  	/*
176  	 * Upper bit of ID is used to remember relationship between "linked"
177  	 * registers. Example:
178  	 * r1 = r2;    both will have r1->id == r2->id == N
179  	 * r1 += 10;   r1->id == N | BPF_ADD_CONST and r1->off == 10
180  	 */
181  #define BPF_ADD_CONST (1U << 31)
182  	u32 id;
183  	/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
184  	 * from a pointer-cast helper, bpf_sk_fullsock() and
185  	 * bpf_tcp_sock().
186  	 *
187  	 * Consider the following where "sk" is a reference counted
188  	 * pointer returned from "sk = bpf_sk_lookup_tcp();":
189  	 *
190  	 * 1: sk = bpf_sk_lookup_tcp();
191  	 * 2: if (!sk) { return 0; }
192  	 * 3: fullsock = bpf_sk_fullsock(sk);
193  	 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
194  	 * 5: tp = bpf_tcp_sock(fullsock);
195  	 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
196  	 * 7: bpf_sk_release(sk);
197  	 * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
198  	 *
199  	 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
200  	 * "tp" ptr should be invalidated also.  In order to do that,
201  	 * the reg holding "fullsock" and "sk" need to remember
202  	 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
203  	 * such that the verifier can reset all regs which have
204  	 * ref_obj_id matching the sk_reg->id.
205  	 *
206  	 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
207  	 * sk_reg->id will stay as NULL-marking purpose only.
208  	 * After NULL-marking is done, sk_reg->id can be reset to 0.
209  	 *
210  	 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
211  	 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
212  	 *
213  	 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
214  	 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
215  	 * which is the same as sk_reg->ref_obj_id.
216  	 *
217  	 * From the verifier perspective, if sk, fullsock and tp
218  	 * are not NULL, they are the same ptr with different
219  	 * reg->type.  In particular, bpf_sk_release(tp) is also
220  	 * allowed and has the same effect as bpf_sk_release(sk).
221  	 */
222  	u32 ref_obj_id;
223  	/* parentage chain for liveness checking */
224  	struct bpf_reg_state *parent;
225  	/* Inside the callee two registers can be both PTR_TO_STACK like
226  	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
227  	 * while another to the caller's stack. To differentiate them 'frameno'
228  	 * is used which is an index in bpf_verifier_state->frame[] array
229  	 * pointing to bpf_func_state.
230  	 */
231  	u32 frameno;
232  	/* Tracks subreg definition. The stored value is the insn_idx of the
233  	 * writing insn. This is safe because subreg_def is used before any insn
234  	 * patching which only happens after main verification finished.
235  	 */
236  	s32 subreg_def;
237  	enum bpf_reg_liveness live;
238  	/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
239  	bool precise;
240  };
241  
242  enum bpf_stack_slot_type {
243  	STACK_INVALID,    /* nothing was stored in this stack slot */
244  	STACK_SPILL,      /* register spilled into stack */
245  	STACK_MISC,	  /* BPF program wrote some data into this slot */
246  	STACK_ZERO,	  /* BPF program wrote constant zero */
247  	/* A dynptr is stored in this stack slot. The type of dynptr
248  	 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
249  	 */
250  	STACK_DYNPTR,
251  	STACK_ITER,
252  };
253  
254  #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
255  
256  #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
257  			  (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
258  			  (1 << BPF_REG_5))
259  
260  #define BPF_DYNPTR_SIZE		sizeof(struct bpf_dynptr_kern)
261  #define BPF_DYNPTR_NR_SLOTS		(BPF_DYNPTR_SIZE / BPF_REG_SIZE)
262  
263  struct bpf_stack_state {
264  	struct bpf_reg_state spilled_ptr;
265  	u8 slot_type[BPF_REG_SIZE];
266  };
267  
268  struct bpf_reference_state {
269  	/* Track each reference created with a unique id, even if the same
270  	 * instruction creates the reference multiple times (eg, via CALL).
271  	 */
272  	int id;
273  	/* Instruction where the allocation of this reference occurred. This
274  	 * is used purely to inform the user of a reference leak.
275  	 */
276  	int insn_idx;
277  	/* There can be a case like:
278  	 * main (frame 0)
279  	 *  cb (frame 1)
280  	 *   func (frame 3)
281  	 *    cb (frame 4)
282  	 * Hence for frame 4, if callback_ref just stored boolean, it would be
283  	 * impossible to distinguish nested callback refs. Hence store the
284  	 * frameno and compare that to callback_ref in check_reference_leak when
285  	 * exiting a callback function.
286  	 */
287  	int callback_ref;
288  };
289  
290  struct bpf_retval_range {
291  	s32 minval;
292  	s32 maxval;
293  };
294  
295  /* state of the program:
296   * type of all registers and stack info
297   */
298  struct bpf_func_state {
299  	struct bpf_reg_state regs[MAX_BPF_REG];
300  	/* index of call instruction that called into this func */
301  	int callsite;
302  	/* stack frame number of this function state from pov of
303  	 * enclosing bpf_verifier_state.
304  	 * 0 = main function, 1 = first callee.
305  	 */
306  	u32 frameno;
307  	/* subprog number == index within subprog_info
308  	 * zero == main subprog
309  	 */
310  	u32 subprogno;
311  	/* Every bpf_timer_start will increment async_entry_cnt.
312  	 * It's used to distinguish:
313  	 * void foo(void) { for(;;); }
314  	 * void foo(void) { bpf_timer_set_callback(,foo); }
315  	 */
316  	u32 async_entry_cnt;
317  	struct bpf_retval_range callback_ret_range;
318  	bool in_callback_fn;
319  	bool in_async_callback_fn;
320  	bool in_exception_callback_fn;
321  	/* For callback calling functions that limit number of possible
322  	 * callback executions (e.g. bpf_loop) keeps track of current
323  	 * simulated iteration number.
324  	 * Value in frame N refers to number of times callback with frame
325  	 * N+1 was simulated, e.g. for the following call:
326  	 *
327  	 *   bpf_loop(..., fn, ...); | suppose current frame is N
328  	 *                           | fn would be simulated in frame N+1
329  	 *                           | number of simulations is tracked in frame N
330  	 */
331  	u32 callback_depth;
332  
333  	/* The following fields should be last. See copy_func_state() */
334  	int acquired_refs;
335  	struct bpf_reference_state *refs;
336  	/* The state of the stack. Each element of the array describes BPF_REG_SIZE
337  	 * (i.e. 8) bytes worth of stack memory.
338  	 * stack[0] represents bytes [*(r10-8)..*(r10-1)]
339  	 * stack[1] represents bytes [*(r10-16)..*(r10-9)]
340  	 * ...
341  	 * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
342  	 */
343  	struct bpf_stack_state *stack;
344  	/* Size of the current stack, in bytes. The stack state is tracked below, in
345  	 * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
346  	 */
347  	int allocated_stack;
348  };
349  
350  #define MAX_CALL_FRAMES 8
351  
352  /* instruction history flags, used in bpf_jmp_history_entry.flags field */
353  enum {
354  	/* instruction references stack slot through PTR_TO_STACK register;
355  	 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
356  	 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
357  	 * 8 bytes per slot, so slot index (spi) is [0, 63])
358  	 */
359  	INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
360  
361  	INSN_F_SPI_MASK = 0x3f, /* 6 bits */
362  	INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
363  
364  	INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
365  };
366  
367  static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
368  static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
369  
370  struct bpf_jmp_history_entry {
371  	u32 idx;
372  	/* insn idx can't be bigger than 1 million */
373  	u32 prev_idx : 22;
374  	/* special flags, e.g., whether insn is doing register stack spill/load */
375  	u32 flags : 10;
376  	/* additional registers that need precision tracking when this
377  	 * jump is backtracked, vector of six 10-bit records
378  	 */
379  	u64 linked_regs;
380  };
381  
382  /* Maximum number of register states that can exist at once */
383  #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
384  struct bpf_verifier_state {
385  	/* call stack tracking */
386  	struct bpf_func_state *frame[MAX_CALL_FRAMES];
387  	struct bpf_verifier_state *parent;
388  	/*
389  	 * 'branches' field is the number of branches left to explore:
390  	 * 0 - all possible paths from this state reached bpf_exit or
391  	 * were safely pruned
392  	 * 1 - at least one path is being explored.
393  	 * This state hasn't reached bpf_exit
394  	 * 2 - at least two paths are being explored.
395  	 * This state is an immediate parent of two children.
396  	 * One is fallthrough branch with branches==1 and another
397  	 * state is pushed into stack (to be explored later) also with
398  	 * branches==1. The parent of this state has branches==1.
399  	 * The verifier state tree connected via 'parent' pointer looks like:
400  	 * 1
401  	 * 1
402  	 * 2 -> 1 (first 'if' pushed into stack)
403  	 * 1
404  	 * 2 -> 1 (second 'if' pushed into stack)
405  	 * 1
406  	 * 1
407  	 * 1 bpf_exit.
408  	 *
409  	 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
410  	 * and the verifier state tree will look:
411  	 * 1
412  	 * 1
413  	 * 2 -> 1 (first 'if' pushed into stack)
414  	 * 1
415  	 * 1 -> 1 (second 'if' pushed into stack)
416  	 * 0
417  	 * 0
418  	 * 0 bpf_exit.
419  	 * After pop_stack() the do_check() will resume at second 'if'.
420  	 *
421  	 * If is_state_visited() sees a state with branches > 0 it means
422  	 * there is a loop. If such state is exactly equal to the current state
423  	 * it's an infinite loop. Note states_equal() checks for states
424  	 * equivalency, so two states being 'states_equal' does not mean
425  	 * infinite loop. The exact comparison is provided by
426  	 * states_maybe_looping() function. It's a stronger pre-check and
427  	 * much faster than states_equal().
428  	 *
429  	 * This algorithm may not find all possible infinite loops or
430  	 * loop iteration count may be too high.
431  	 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
432  	 */
433  	u32 branches;
434  	u32 insn_idx;
435  	u32 curframe;
436  
437  	struct bpf_active_lock active_lock;
438  	bool speculative;
439  	bool active_rcu_lock;
440  	u32 active_preempt_lock;
441  	/* If this state was ever pointed-to by other state's loop_entry field
442  	 * this flag would be set to true. Used to avoid freeing such states
443  	 * while they are still in use.
444  	 */
445  	bool used_as_loop_entry;
446  	bool in_sleepable;
447  
448  	/* first and last insn idx of this verifier state */
449  	u32 first_insn_idx;
450  	u32 last_insn_idx;
451  	/* If this state is a part of states loop this field points to some
452  	 * parent of this state such that:
453  	 * - it is also a member of the same states loop;
454  	 * - DFS states traversal starting from initial state visits loop_entry
455  	 *   state before this state.
456  	 * Used to compute topmost loop entry for state loops.
457  	 * State loops might appear because of open coded iterators logic.
458  	 * See get_loop_entry() for more information.
459  	 */
460  	struct bpf_verifier_state *loop_entry;
461  	/* jmp history recorded from first to last.
462  	 * backtracking is using it to go from last to first.
463  	 * For most states jmp_history_cnt is [0-3].
464  	 * For loops can go up to ~40.
465  	 */
466  	struct bpf_jmp_history_entry *jmp_history;
467  	u32 jmp_history_cnt;
468  	u32 dfs_depth;
469  	u32 callback_unroll_depth;
470  	u32 may_goto_depth;
471  };
472  
473  #define bpf_get_spilled_reg(slot, frame, mask)				\
474  	(((slot < frame->allocated_stack / BPF_REG_SIZE) &&		\
475  	  ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
476  	 ? &frame->stack[slot].spilled_ptr : NULL)
477  
478  /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
479  #define bpf_for_each_spilled_reg(iter, frame, reg, mask)			\
480  	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask);		\
481  	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
482  	     iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
483  
484  #define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr)   \
485  	({                                                               \
486  		struct bpf_verifier_state *___vstate = __vst;            \
487  		int ___i, ___j;                                          \
488  		for (___i = 0; ___i <= ___vstate->curframe; ___i++) {    \
489  			struct bpf_reg_state *___regs;                   \
490  			__state = ___vstate->frame[___i];                \
491  			___regs = __state->regs;                         \
492  			for (___j = 0; ___j < MAX_BPF_REG; ___j++) {     \
493  				__reg = &___regs[___j];                  \
494  				(void)(__expr);                          \
495  			}                                                \
496  			bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
497  				if (!__reg)                              \
498  					continue;                        \
499  				(void)(__expr);                          \
500  			}                                                \
501  		}                                                        \
502  	})
503  
504  /* Invoke __expr over regsiters in __vst, setting __state and __reg */
505  #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
506  	bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
507  
508  /* linked list of verifier states used to prune search */
509  struct bpf_verifier_state_list {
510  	struct bpf_verifier_state state;
511  	struct bpf_verifier_state_list *next;
512  	int miss_cnt, hit_cnt;
513  };
514  
515  struct bpf_loop_inline_state {
516  	unsigned int initialized:1; /* set to true upon first entry */
517  	unsigned int fit_for_inline:1; /* true if callback function is the same
518  					* at each call and flags are always zero
519  					*/
520  	u32 callback_subprogno; /* valid when fit_for_inline is true */
521  };
522  
523  /* pointer and state for maps */
524  struct bpf_map_ptr_state {
525  	struct bpf_map *map_ptr;
526  	bool poison;
527  	bool unpriv;
528  };
529  
530  /* Possible states for alu_state member. */
531  #define BPF_ALU_SANITIZE_SRC		(1U << 0)
532  #define BPF_ALU_SANITIZE_DST		(1U << 1)
533  #define BPF_ALU_NEG_VALUE		(1U << 2)
534  #define BPF_ALU_NON_POINTER		(1U << 3)
535  #define BPF_ALU_IMMEDIATE		(1U << 4)
536  #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
537  					 BPF_ALU_SANITIZE_DST)
538  
539  struct bpf_insn_aux_data {
540  	union {
541  		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
542  		struct bpf_map_ptr_state map_ptr_state;
543  		s32 call_imm;			/* saved imm field of call insn */
544  		u32 alu_limit;			/* limit for add/sub register with pointer */
545  		struct {
546  			u32 map_index;		/* index into used_maps[] */
547  			u32 map_off;		/* offset from value base address */
548  		};
549  		struct {
550  			enum bpf_reg_type reg_type;	/* type of pseudo_btf_id */
551  			union {
552  				struct {
553  					struct btf *btf;
554  					u32 btf_id;	/* btf_id for struct typed var */
555  				};
556  				u32 mem_size;	/* mem_size for non-struct typed var */
557  			};
558  		} btf_var;
559  		/* if instruction is a call to bpf_loop this field tracks
560  		 * the state of the relevant registers to make decision about inlining
561  		 */
562  		struct bpf_loop_inline_state loop_inline_state;
563  	};
564  	union {
565  		/* remember the size of type passed to bpf_obj_new to rewrite R1 */
566  		u64 obj_new_size;
567  		/* remember the offset of node field within type to rewrite */
568  		u64 insert_off;
569  	};
570  	struct btf_struct_meta *kptr_struct_meta;
571  	u64 map_key_state; /* constant (32 bit) key tracking for maps */
572  	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
573  	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
574  	bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
575  	bool zext_dst; /* this insn zero extends dst reg */
576  	bool needs_zext; /* alu op needs to clear upper bits */
577  	bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
578  	bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
579  	bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
580  	u8 alu_state; /* used in combination with alu_limit */
581  	/* true if STX or LDX instruction is a part of a spill/fill
582  	 * pattern for a bpf_fastcall call.
583  	 */
584  	u8 fastcall_pattern:1;
585  	/* for CALL instructions, a number of spill/fill pairs in the
586  	 * bpf_fastcall pattern.
587  	 */
588  	u8 fastcall_spills_num:3;
589  
590  	/* below fields are initialized once */
591  	unsigned int orig_idx; /* original instruction index */
592  	bool jmp_point;
593  	bool prune_point;
594  	/* ensure we check state equivalence and save state checkpoint and
595  	 * this instruction, regardless of any heuristics
596  	 */
597  	bool force_checkpoint;
598  	/* true if instruction is a call to a helper function that
599  	 * accepts callback function as a parameter.
600  	 */
601  	bool calls_callback;
602  };
603  
604  #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
605  #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
606  
607  #define BPF_VERIFIER_TMP_LOG_SIZE	1024
608  
609  struct bpf_verifier_log {
610  	/* Logical start and end positions of a "log window" of the verifier log.
611  	 * start_pos == 0 means we haven't truncated anything.
612  	 * Once truncation starts to happen, start_pos + len_total == end_pos,
613  	 * except during log reset situations, in which (end_pos - start_pos)
614  	 * might get smaller than len_total (see bpf_vlog_reset()).
615  	 * Generally, (end_pos - start_pos) gives number of useful data in
616  	 * user log buffer.
617  	 */
618  	u64 start_pos;
619  	u64 end_pos;
620  	char __user *ubuf;
621  	u32 level;
622  	u32 len_total;
623  	u32 len_max;
624  	char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
625  };
626  
627  #define BPF_LOG_LEVEL1	1
628  #define BPF_LOG_LEVEL2	2
629  #define BPF_LOG_STATS	4
630  #define BPF_LOG_FIXED	8
631  #define BPF_LOG_LEVEL	(BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
632  #define BPF_LOG_MASK	(BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
633  #define BPF_LOG_KERNEL	(BPF_LOG_MASK + 1) /* kernel internal flag */
634  #define BPF_LOG_MIN_ALIGNMENT 8U
635  #define BPF_LOG_ALIGNMENT 40U
636  
bpf_verifier_log_needed(const struct bpf_verifier_log * log)637  static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
638  {
639  	return log && log->level;
640  }
641  
642  #define BPF_MAX_SUBPROGS 256
643  
644  struct bpf_subprog_arg_info {
645  	enum bpf_arg_type arg_type;
646  	union {
647  		u32 mem_size;
648  		u32 btf_id;
649  	};
650  };
651  
652  struct bpf_subprog_info {
653  	/* 'start' has to be the first field otherwise find_subprog() won't work */
654  	u32 start; /* insn idx of function entry point */
655  	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
656  	u16 stack_depth; /* max. stack depth used by this function */
657  	u16 stack_extra;
658  	/* offsets in range [stack_depth .. fastcall_stack_off)
659  	 * are used for bpf_fastcall spills and fills.
660  	 */
661  	s16 fastcall_stack_off;
662  	bool has_tail_call: 1;
663  	bool tail_call_reachable: 1;
664  	bool has_ld_abs: 1;
665  	bool is_cb: 1;
666  	bool is_async_cb: 1;
667  	bool is_exception_cb: 1;
668  	bool args_cached: 1;
669  	/* true if bpf_fastcall stack region is used by functions that can't be inlined */
670  	bool keep_fastcall_stack: 1;
671  
672  	u8 arg_cnt;
673  	struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
674  };
675  
676  struct bpf_verifier_env;
677  
678  struct backtrack_state {
679  	struct bpf_verifier_env *env;
680  	u32 frame;
681  	u32 reg_masks[MAX_CALL_FRAMES];
682  	u64 stack_masks[MAX_CALL_FRAMES];
683  };
684  
685  struct bpf_id_pair {
686  	u32 old;
687  	u32 cur;
688  };
689  
690  struct bpf_idmap {
691  	u32 tmp_id_gen;
692  	struct bpf_id_pair map[BPF_ID_MAP_SIZE];
693  };
694  
695  struct bpf_idset {
696  	u32 count;
697  	u32 ids[BPF_ID_MAP_SIZE];
698  };
699  
700  /* single container for all structs
701   * one verifier_env per bpf_check() call
702   */
703  struct bpf_verifier_env {
704  	u32 insn_idx;
705  	u32 prev_insn_idx;
706  	struct bpf_prog *prog;		/* eBPF program being verified */
707  	const struct bpf_verifier_ops *ops;
708  	struct module *attach_btf_mod;	/* The owner module of prog->aux->attach_btf */
709  	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
710  	int stack_size;			/* number of states to be processed */
711  	bool strict_alignment;		/* perform strict pointer alignment checks */
712  	bool test_state_freq;		/* test verifier with different pruning frequency */
713  	bool test_reg_invariants;	/* fail verification on register invariants violations */
714  	struct bpf_verifier_state *cur_state; /* current verifier state */
715  	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
716  	struct bpf_verifier_state_list *free_list;
717  	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
718  	struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
719  	u32 used_map_cnt;		/* number of used maps */
720  	u32 used_btf_cnt;		/* number of used BTF objects */
721  	u32 id_gen;			/* used to generate unique reg IDs */
722  	u32 hidden_subprog_cnt;		/* number of hidden subprogs */
723  	int exception_callback_subprog;
724  	bool explore_alu_limits;
725  	bool allow_ptr_leaks;
726  	/* Allow access to uninitialized stack memory. Writes with fixed offset are
727  	 * always allowed, so this refers to reads (with fixed or variable offset),
728  	 * to writes with variable offset and to indirect (helper) accesses.
729  	 */
730  	bool allow_uninit_stack;
731  	bool bpf_capable;
732  	bool bypass_spec_v1;
733  	bool bypass_spec_v4;
734  	bool seen_direct_write;
735  	bool seen_exception;
736  	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
737  	const struct bpf_line_info *prev_linfo;
738  	struct bpf_verifier_log log;
739  	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
740  	union {
741  		struct bpf_idmap idmap_scratch;
742  		struct bpf_idset idset_scratch;
743  	};
744  	struct {
745  		int *insn_state;
746  		int *insn_stack;
747  		int cur_stack;
748  	} cfg;
749  	struct backtrack_state bt;
750  	struct bpf_jmp_history_entry *cur_hist_ent;
751  	u32 pass_cnt; /* number of times do_check() was called */
752  	u32 subprog_cnt;
753  	/* number of instructions analyzed by the verifier */
754  	u32 prev_insn_processed, insn_processed;
755  	/* number of jmps, calls, exits analyzed so far */
756  	u32 prev_jmps_processed, jmps_processed;
757  	/* total verification time */
758  	u64 verification_time;
759  	/* maximum number of verifier states kept in 'branching' instructions */
760  	u32 max_states_per_insn;
761  	/* total number of allocated verifier states */
762  	u32 total_states;
763  	/* some states are freed during program analysis.
764  	 * this is peak number of states. this number dominates kernel
765  	 * memory consumption during verification
766  	 */
767  	u32 peak_states;
768  	/* longest register parentage chain walked for liveness marking */
769  	u32 longest_mark_read_walk;
770  	bpfptr_t fd_array;
771  
772  	/* bit mask to keep track of whether a register has been accessed
773  	 * since the last time the function state was printed
774  	 */
775  	u32 scratched_regs;
776  	/* Same as scratched_regs but for stack slots */
777  	u64 scratched_stack_slots;
778  	u64 prev_log_pos, prev_insn_print_pos;
779  	/* buffer used to temporary hold constants as scalar registers */
780  	struct bpf_reg_state fake_reg[2];
781  	/* buffer used to generate temporary string representations,
782  	 * e.g., in reg_type_str() to generate reg_type string
783  	 */
784  	char tmp_str_buf[TMP_STR_BUF_LEN];
785  	struct bpf_insn insn_buf[INSN_BUF_SIZE];
786  	struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
787  };
788  
subprog_aux(struct bpf_verifier_env * env,int subprog)789  static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
790  {
791  	return &env->prog->aux->func_info_aux[subprog];
792  }
793  
subprog_info(struct bpf_verifier_env * env,int subprog)794  static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
795  {
796  	return &env->subprog_info[subprog];
797  }
798  
799  __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
800  				      const char *fmt, va_list args);
801  __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
802  					   const char *fmt, ...);
803  __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
804  			    const char *fmt, ...);
805  int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
806  		  char __user *log_buf, u32 log_size);
807  void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
808  int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
809  
810  __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
811  				  u32 insn_off,
812  				  const char *prefix_fmt, ...);
813  
cur_func(struct bpf_verifier_env * env)814  static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
815  {
816  	struct bpf_verifier_state *cur = env->cur_state;
817  
818  	return cur->frame[cur->curframe];
819  }
820  
cur_regs(struct bpf_verifier_env * env)821  static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
822  {
823  	return cur_func(env)->regs;
824  }
825  
826  int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
827  int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
828  				 int insn_idx, int prev_insn_idx);
829  int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
830  void
831  bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
832  			      struct bpf_insn *insn);
833  void
834  bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
835  
836  /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
bpf_trampoline_compute_key(const struct bpf_prog * tgt_prog,struct btf * btf,u32 btf_id)837  static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
838  					     struct btf *btf, u32 btf_id)
839  {
840  	if (tgt_prog)
841  		return ((u64)tgt_prog->aux->id << 32) | btf_id;
842  	else
843  		return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
844  }
845  
846  /* unpack the IDs from the key as constructed above */
bpf_trampoline_unpack_key(u64 key,u32 * obj_id,u32 * btf_id)847  static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
848  {
849  	if (obj_id)
850  		*obj_id = key >> 32;
851  	if (btf_id)
852  		*btf_id = key & 0x7FFFFFFF;
853  }
854  
855  int bpf_check_attach_target(struct bpf_verifier_log *log,
856  			    const struct bpf_prog *prog,
857  			    const struct bpf_prog *tgt_prog,
858  			    u32 btf_id,
859  			    struct bpf_attach_target_info *tgt_info);
860  void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
861  
862  int mark_chain_precision(struct bpf_verifier_env *env, int regno);
863  
864  #define BPF_BASE_TYPE_MASK	GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
865  
866  /* extract base type from bpf_{arg, return, reg}_type. */
base_type(u32 type)867  static inline u32 base_type(u32 type)
868  {
869  	return type & BPF_BASE_TYPE_MASK;
870  }
871  
872  /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
type_flag(u32 type)873  static inline u32 type_flag(u32 type)
874  {
875  	return type & ~BPF_BASE_TYPE_MASK;
876  }
877  
878  /* only use after check_attach_btf_id() */
resolve_prog_type(const struct bpf_prog * prog)879  static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
880  {
881  	return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
882  		prog->aux->saved_dst_prog_type : prog->type;
883  }
884  
bpf_prog_check_recur(const struct bpf_prog * prog)885  static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
886  {
887  	switch (resolve_prog_type(prog)) {
888  	case BPF_PROG_TYPE_TRACING:
889  		return prog->expected_attach_type != BPF_TRACE_ITER;
890  	case BPF_PROG_TYPE_STRUCT_OPS:
891  	case BPF_PROG_TYPE_LSM:
892  		return false;
893  	default:
894  		return true;
895  	}
896  }
897  
898  #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
899  
bpf_type_has_unsafe_modifiers(u32 type)900  static inline bool bpf_type_has_unsafe_modifiers(u32 type)
901  {
902  	return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
903  }
904  
type_is_ptr_alloc_obj(u32 type)905  static inline bool type_is_ptr_alloc_obj(u32 type)
906  {
907  	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
908  }
909  
type_is_non_owning_ref(u32 type)910  static inline bool type_is_non_owning_ref(u32 type)
911  {
912  	return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
913  }
914  
type_is_pkt_pointer(enum bpf_reg_type type)915  static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
916  {
917  	type = base_type(type);
918  	return type == PTR_TO_PACKET ||
919  	       type == PTR_TO_PACKET_META;
920  }
921  
type_is_sk_pointer(enum bpf_reg_type type)922  static inline bool type_is_sk_pointer(enum bpf_reg_type type)
923  {
924  	return type == PTR_TO_SOCKET ||
925  		type == PTR_TO_SOCK_COMMON ||
926  		type == PTR_TO_TCP_SOCK ||
927  		type == PTR_TO_XDP_SOCK;
928  }
929  
type_may_be_null(u32 type)930  static inline bool type_may_be_null(u32 type)
931  {
932  	return type & PTR_MAYBE_NULL;
933  }
934  
mark_reg_scratched(struct bpf_verifier_env * env,u32 regno)935  static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
936  {
937  	env->scratched_regs |= 1U << regno;
938  }
939  
mark_stack_slot_scratched(struct bpf_verifier_env * env,u32 spi)940  static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
941  {
942  	env->scratched_stack_slots |= 1ULL << spi;
943  }
944  
reg_scratched(const struct bpf_verifier_env * env,u32 regno)945  static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
946  {
947  	return (env->scratched_regs >> regno) & 1;
948  }
949  
stack_slot_scratched(const struct bpf_verifier_env * env,u64 regno)950  static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
951  {
952  	return (env->scratched_stack_slots >> regno) & 1;
953  }
954  
verifier_state_scratched(const struct bpf_verifier_env * env)955  static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
956  {
957  	return env->scratched_regs || env->scratched_stack_slots;
958  }
959  
mark_verifier_state_clean(struct bpf_verifier_env * env)960  static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
961  {
962  	env->scratched_regs = 0U;
963  	env->scratched_stack_slots = 0ULL;
964  }
965  
966  /* Used for printing the entire verifier state. */
mark_verifier_state_scratched(struct bpf_verifier_env * env)967  static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
968  {
969  	env->scratched_regs = ~0U;
970  	env->scratched_stack_slots = ~0ULL;
971  }
972  
bpf_stack_narrow_access_ok(int off,int fill_size,int spill_size)973  static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
974  {
975  #ifdef __BIG_ENDIAN
976  	off -= spill_size - fill_size;
977  #endif
978  
979  	return !(off % BPF_REG_SIZE);
980  }
981  
982  const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
983  const char *dynptr_type_str(enum bpf_dynptr_type type);
984  const char *iter_type_str(const struct btf *btf, u32 btf_id);
985  const char *iter_state_str(enum bpf_iter_state state);
986  
987  void print_verifier_state(struct bpf_verifier_env *env,
988  			  const struct bpf_func_state *state, bool print_all);
989  void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
990  
991  #endif /* _LINUX_BPF_VERIFIER_H */
992