1  // SPDX-License-Identifier: GPL-2.0-only
2  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3   */
4  #include <linux/bpf.h>
5  #include <linux/bpf-cgroup.h>
6  #include <linux/bpf_trace.h>
7  #include <linux/bpf_lirc.h>
8  #include <linux/bpf_verifier.h>
9  #include <linux/bsearch.h>
10  #include <linux/btf.h>
11  #include <linux/syscalls.h>
12  #include <linux/slab.h>
13  #include <linux/sched/signal.h>
14  #include <linux/vmalloc.h>
15  #include <linux/mmzone.h>
16  #include <linux/anon_inodes.h>
17  #include <linux/fdtable.h>
18  #include <linux/file.h>
19  #include <linux/fs.h>
20  #include <linux/license.h>
21  #include <linux/filter.h>
22  #include <linux/kernel.h>
23  #include <linux/idr.h>
24  #include <linux/cred.h>
25  #include <linux/timekeeping.h>
26  #include <linux/ctype.h>
27  #include <linux/nospec.h>
28  #include <linux/audit.h>
29  #include <uapi/linux/btf.h>
30  #include <linux/pgtable.h>
31  #include <linux/bpf_lsm.h>
32  #include <linux/poll.h>
33  #include <linux/sort.h>
34  #include <linux/bpf-netns.h>
35  #include <linux/rcupdate_trace.h>
36  #include <linux/memcontrol.h>
37  #include <linux/trace_events.h>
38  
39  #include <net/netfilter/nf_bpf_link.h>
40  #include <net/netkit.h>
41  #include <net/tcx.h>
42  
43  #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
44  			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
45  			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
46  #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
47  #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
48  #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
49  			IS_FD_HASH(map))
50  
51  #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
52  
53  DEFINE_PER_CPU(int, bpf_prog_active);
54  static DEFINE_IDR(prog_idr);
55  static DEFINE_SPINLOCK(prog_idr_lock);
56  static DEFINE_IDR(map_idr);
57  static DEFINE_SPINLOCK(map_idr_lock);
58  static DEFINE_IDR(link_idr);
59  static DEFINE_SPINLOCK(link_idr_lock);
60  
61  int sysctl_unprivileged_bpf_disabled __read_mostly =
62  	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
63  
64  static const struct bpf_map_ops * const bpf_map_types[] = {
65  #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
66  #define BPF_MAP_TYPE(_id, _ops) \
67  	[_id] = &_ops,
68  #define BPF_LINK_TYPE(_id, _name)
69  #include <linux/bpf_types.h>
70  #undef BPF_PROG_TYPE
71  #undef BPF_MAP_TYPE
72  #undef BPF_LINK_TYPE
73  };
74  
75  /*
76   * If we're handed a bigger struct than we know of, ensure all the unknown bits
77   * are 0 - i.e. new user-space does not rely on any kernel feature extensions
78   * we don't know about yet.
79   *
80   * There is a ToCToU between this function call and the following
81   * copy_from_user() call. However, this is not a concern since this function is
82   * meant to be a future-proofing of bits.
83   */
bpf_check_uarg_tail_zero(bpfptr_t uaddr,size_t expected_size,size_t actual_size)84  int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
85  			     size_t expected_size,
86  			     size_t actual_size)
87  {
88  	int res;
89  
90  	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
91  		return -E2BIG;
92  
93  	if (actual_size <= expected_size)
94  		return 0;
95  
96  	if (uaddr.is_kernel)
97  		res = memchr_inv(uaddr.kernel + expected_size, 0,
98  				 actual_size - expected_size) == NULL;
99  	else
100  		res = check_zeroed_user(uaddr.user + expected_size,
101  					actual_size - expected_size);
102  	if (res < 0)
103  		return res;
104  	return res ? 0 : -E2BIG;
105  }
106  
107  const struct bpf_map_ops bpf_map_offload_ops = {
108  	.map_meta_equal = bpf_map_meta_equal,
109  	.map_alloc = bpf_map_offload_map_alloc,
110  	.map_free = bpf_map_offload_map_free,
111  	.map_check_btf = map_check_no_btf,
112  	.map_mem_usage = bpf_map_offload_map_mem_usage,
113  };
114  
bpf_map_write_active_inc(struct bpf_map * map)115  static void bpf_map_write_active_inc(struct bpf_map *map)
116  {
117  	atomic64_inc(&map->writecnt);
118  }
119  
bpf_map_write_active_dec(struct bpf_map * map)120  static void bpf_map_write_active_dec(struct bpf_map *map)
121  {
122  	atomic64_dec(&map->writecnt);
123  }
124  
bpf_map_write_active(const struct bpf_map * map)125  bool bpf_map_write_active(const struct bpf_map *map)
126  {
127  	return atomic64_read(&map->writecnt) != 0;
128  }
129  
bpf_map_value_size(const struct bpf_map * map)130  static u32 bpf_map_value_size(const struct bpf_map *map)
131  {
132  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134  	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135  	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136  		return round_up(map->value_size, 8) * num_possible_cpus();
137  	else if (IS_FD_MAP(map))
138  		return sizeof(u32);
139  	else
140  		return  map->value_size;
141  }
142  
maybe_wait_bpf_programs(struct bpf_map * map)143  static void maybe_wait_bpf_programs(struct bpf_map *map)
144  {
145  	/* Wait for any running non-sleepable BPF programs to complete so that
146  	 * userspace, when we return to it, knows that all non-sleepable
147  	 * programs that could be running use the new map value. For sleepable
148  	 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
149  	 * for the completions of these programs, but considering the waiting
150  	 * time can be very long and userspace may think it will hang forever,
151  	 * so don't handle sleepable BPF programs now.
152  	 */
153  	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
154  	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
155  		synchronize_rcu();
156  }
157  
bpf_map_update_value(struct bpf_map * map,struct file * map_file,void * key,void * value,__u64 flags)158  static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
159  				void *key, void *value, __u64 flags)
160  {
161  	int err;
162  
163  	/* Need to create a kthread, thus must support schedule */
164  	if (bpf_map_is_offloaded(map)) {
165  		return bpf_map_offload_update_elem(map, key, value, flags);
166  	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
167  		   map->map_type == BPF_MAP_TYPE_ARENA ||
168  		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
169  		return map->ops->map_update_elem(map, key, value, flags);
170  	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
171  		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
172  		return sock_map_update_elem_sys(map, key, value, flags);
173  	} else if (IS_FD_PROG_ARRAY(map)) {
174  		return bpf_fd_array_map_update_elem(map, map_file, key, value,
175  						    flags);
176  	}
177  
178  	bpf_disable_instrumentation();
179  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
180  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
181  		err = bpf_percpu_hash_update(map, key, value, flags);
182  	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
183  		err = bpf_percpu_array_update(map, key, value, flags);
184  	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
185  		err = bpf_percpu_cgroup_storage_update(map, key, value,
186  						       flags);
187  	} else if (IS_FD_ARRAY(map)) {
188  		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
189  						   flags);
190  	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
191  		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
192  						  flags);
193  	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
194  		/* rcu_read_lock() is not needed */
195  		err = bpf_fd_reuseport_array_update_elem(map, key, value,
196  							 flags);
197  	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
198  		   map->map_type == BPF_MAP_TYPE_STACK ||
199  		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
200  		err = map->ops->map_push_elem(map, value, flags);
201  	} else {
202  		rcu_read_lock();
203  		err = map->ops->map_update_elem(map, key, value, flags);
204  		rcu_read_unlock();
205  	}
206  	bpf_enable_instrumentation();
207  
208  	return err;
209  }
210  
bpf_map_copy_value(struct bpf_map * map,void * key,void * value,__u64 flags)211  static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
212  			      __u64 flags)
213  {
214  	void *ptr;
215  	int err;
216  
217  	if (bpf_map_is_offloaded(map))
218  		return bpf_map_offload_lookup_elem(map, key, value);
219  
220  	bpf_disable_instrumentation();
221  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
222  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
223  		err = bpf_percpu_hash_copy(map, key, value);
224  	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
225  		err = bpf_percpu_array_copy(map, key, value);
226  	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
227  		err = bpf_percpu_cgroup_storage_copy(map, key, value);
228  	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
229  		err = bpf_stackmap_copy(map, key, value);
230  	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
231  		err = bpf_fd_array_map_lookup_elem(map, key, value);
232  	} else if (IS_FD_HASH(map)) {
233  		err = bpf_fd_htab_map_lookup_elem(map, key, value);
234  	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
235  		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
236  	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
237  		   map->map_type == BPF_MAP_TYPE_STACK ||
238  		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
239  		err = map->ops->map_peek_elem(map, value);
240  	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
241  		/* struct_ops map requires directly updating "value" */
242  		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
243  	} else {
244  		rcu_read_lock();
245  		if (map->ops->map_lookup_elem_sys_only)
246  			ptr = map->ops->map_lookup_elem_sys_only(map, key);
247  		else
248  			ptr = map->ops->map_lookup_elem(map, key);
249  		if (IS_ERR(ptr)) {
250  			err = PTR_ERR(ptr);
251  		} else if (!ptr) {
252  			err = -ENOENT;
253  		} else {
254  			err = 0;
255  			if (flags & BPF_F_LOCK)
256  				/* lock 'ptr' and copy everything but lock */
257  				copy_map_value_locked(map, value, ptr, true);
258  			else
259  				copy_map_value(map, value, ptr);
260  			/* mask lock and timer, since value wasn't zero inited */
261  			check_and_init_map_value(map, value);
262  		}
263  		rcu_read_unlock();
264  	}
265  
266  	bpf_enable_instrumentation();
267  
268  	return err;
269  }
270  
271  /* Please, do not use this function outside from the map creation path
272   * (e.g. in map update path) without taking care of setting the active
273   * memory cgroup (see at bpf_map_kmalloc_node() for example).
274   */
__bpf_map_area_alloc(u64 size,int numa_node,bool mmapable)275  static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
276  {
277  	/* We really just want to fail instead of triggering OOM killer
278  	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
279  	 * which is used for lower order allocation requests.
280  	 *
281  	 * It has been observed that higher order allocation requests done by
282  	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
283  	 * to reclaim memory from the page cache, thus we set
284  	 * __GFP_RETRY_MAYFAIL to avoid such situations.
285  	 */
286  
287  	gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
288  	unsigned int flags = 0;
289  	unsigned long align = 1;
290  	void *area;
291  
292  	if (size >= SIZE_MAX)
293  		return NULL;
294  
295  	/* kmalloc()'ed memory can't be mmap()'ed */
296  	if (mmapable) {
297  		BUG_ON(!PAGE_ALIGNED(size));
298  		align = SHMLBA;
299  		flags = VM_USERMAP;
300  	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
301  		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
302  				    numa_node);
303  		if (area != NULL)
304  			return area;
305  	}
306  
307  	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
308  			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
309  			flags, numa_node, __builtin_return_address(0));
310  }
311  
bpf_map_area_alloc(u64 size,int numa_node)312  void *bpf_map_area_alloc(u64 size, int numa_node)
313  {
314  	return __bpf_map_area_alloc(size, numa_node, false);
315  }
316  
bpf_map_area_mmapable_alloc(u64 size,int numa_node)317  void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
318  {
319  	return __bpf_map_area_alloc(size, numa_node, true);
320  }
321  
bpf_map_area_free(void * area)322  void bpf_map_area_free(void *area)
323  {
324  	kvfree(area);
325  }
326  
bpf_map_flags_retain_permanent(u32 flags)327  static u32 bpf_map_flags_retain_permanent(u32 flags)
328  {
329  	/* Some map creation flags are not tied to the map object but
330  	 * rather to the map fd instead, so they have no meaning upon
331  	 * map object inspection since multiple file descriptors with
332  	 * different (access) properties can exist here. Thus, given
333  	 * this has zero meaning for the map itself, lets clear these
334  	 * from here.
335  	 */
336  	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
337  }
338  
bpf_map_init_from_attr(struct bpf_map * map,union bpf_attr * attr)339  void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
340  {
341  	map->map_type = attr->map_type;
342  	map->key_size = attr->key_size;
343  	map->value_size = attr->value_size;
344  	map->max_entries = attr->max_entries;
345  	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
346  	map->numa_node = bpf_map_attr_numa_node(attr);
347  	map->map_extra = attr->map_extra;
348  }
349  
bpf_map_alloc_id(struct bpf_map * map)350  static int bpf_map_alloc_id(struct bpf_map *map)
351  {
352  	int id;
353  
354  	idr_preload(GFP_KERNEL);
355  	spin_lock_bh(&map_idr_lock);
356  	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
357  	if (id > 0)
358  		map->id = id;
359  	spin_unlock_bh(&map_idr_lock);
360  	idr_preload_end();
361  
362  	if (WARN_ON_ONCE(!id))
363  		return -ENOSPC;
364  
365  	return id > 0 ? 0 : id;
366  }
367  
bpf_map_free_id(struct bpf_map * map)368  void bpf_map_free_id(struct bpf_map *map)
369  {
370  	unsigned long flags;
371  
372  	/* Offloaded maps are removed from the IDR store when their device
373  	 * disappears - even if someone holds an fd to them they are unusable,
374  	 * the memory is gone, all ops will fail; they are simply waiting for
375  	 * refcnt to drop to be freed.
376  	 */
377  	if (!map->id)
378  		return;
379  
380  	spin_lock_irqsave(&map_idr_lock, flags);
381  
382  	idr_remove(&map_idr, map->id);
383  	map->id = 0;
384  
385  	spin_unlock_irqrestore(&map_idr_lock, flags);
386  }
387  
388  #ifdef CONFIG_MEMCG
bpf_map_save_memcg(struct bpf_map * map)389  static void bpf_map_save_memcg(struct bpf_map *map)
390  {
391  	/* Currently if a map is created by a process belonging to the root
392  	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
393  	 * So we have to check map->objcg for being NULL each time it's
394  	 * being used.
395  	 */
396  	if (memcg_bpf_enabled())
397  		map->objcg = get_obj_cgroup_from_current();
398  }
399  
bpf_map_release_memcg(struct bpf_map * map)400  static void bpf_map_release_memcg(struct bpf_map *map)
401  {
402  	if (map->objcg)
403  		obj_cgroup_put(map->objcg);
404  }
405  
bpf_map_get_memcg(const struct bpf_map * map)406  static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
407  {
408  	if (map->objcg)
409  		return get_mem_cgroup_from_objcg(map->objcg);
410  
411  	return root_mem_cgroup;
412  }
413  
bpf_map_kmalloc_node(const struct bpf_map * map,size_t size,gfp_t flags,int node)414  void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
415  			   int node)
416  {
417  	struct mem_cgroup *memcg, *old_memcg;
418  	void *ptr;
419  
420  	memcg = bpf_map_get_memcg(map);
421  	old_memcg = set_active_memcg(memcg);
422  	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
423  	set_active_memcg(old_memcg);
424  	mem_cgroup_put(memcg);
425  
426  	return ptr;
427  }
428  
bpf_map_kzalloc(const struct bpf_map * map,size_t size,gfp_t flags)429  void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
430  {
431  	struct mem_cgroup *memcg, *old_memcg;
432  	void *ptr;
433  
434  	memcg = bpf_map_get_memcg(map);
435  	old_memcg = set_active_memcg(memcg);
436  	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
437  	set_active_memcg(old_memcg);
438  	mem_cgroup_put(memcg);
439  
440  	return ptr;
441  }
442  
bpf_map_kvcalloc(struct bpf_map * map,size_t n,size_t size,gfp_t flags)443  void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
444  		       gfp_t flags)
445  {
446  	struct mem_cgroup *memcg, *old_memcg;
447  	void *ptr;
448  
449  	memcg = bpf_map_get_memcg(map);
450  	old_memcg = set_active_memcg(memcg);
451  	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
452  	set_active_memcg(old_memcg);
453  	mem_cgroup_put(memcg);
454  
455  	return ptr;
456  }
457  
bpf_map_alloc_percpu(const struct bpf_map * map,size_t size,size_t align,gfp_t flags)458  void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
459  				    size_t align, gfp_t flags)
460  {
461  	struct mem_cgroup *memcg, *old_memcg;
462  	void __percpu *ptr;
463  
464  	memcg = bpf_map_get_memcg(map);
465  	old_memcg = set_active_memcg(memcg);
466  	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
467  	set_active_memcg(old_memcg);
468  	mem_cgroup_put(memcg);
469  
470  	return ptr;
471  }
472  
473  #else
bpf_map_save_memcg(struct bpf_map * map)474  static void bpf_map_save_memcg(struct bpf_map *map)
475  {
476  }
477  
bpf_map_release_memcg(struct bpf_map * map)478  static void bpf_map_release_memcg(struct bpf_map *map)
479  {
480  }
481  #endif
482  
bpf_map_alloc_pages(const struct bpf_map * map,gfp_t gfp,int nid,unsigned long nr_pages,struct page ** pages)483  int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
484  			unsigned long nr_pages, struct page **pages)
485  {
486  	unsigned long i, j;
487  	struct page *pg;
488  	int ret = 0;
489  #ifdef CONFIG_MEMCG
490  	struct mem_cgroup *memcg, *old_memcg;
491  
492  	memcg = bpf_map_get_memcg(map);
493  	old_memcg = set_active_memcg(memcg);
494  #endif
495  	for (i = 0; i < nr_pages; i++) {
496  		pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
497  
498  		if (pg) {
499  			pages[i] = pg;
500  			continue;
501  		}
502  		for (j = 0; j < i; j++)
503  			__free_page(pages[j]);
504  		ret = -ENOMEM;
505  		break;
506  	}
507  
508  #ifdef CONFIG_MEMCG
509  	set_active_memcg(old_memcg);
510  	mem_cgroup_put(memcg);
511  #endif
512  	return ret;
513  }
514  
515  
btf_field_cmp(const void * a,const void * b)516  static int btf_field_cmp(const void *a, const void *b)
517  {
518  	const struct btf_field *f1 = a, *f2 = b;
519  
520  	if (f1->offset < f2->offset)
521  		return -1;
522  	else if (f1->offset > f2->offset)
523  		return 1;
524  	return 0;
525  }
526  
btf_record_find(const struct btf_record * rec,u32 offset,u32 field_mask)527  struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
528  				  u32 field_mask)
529  {
530  	struct btf_field *field;
531  
532  	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
533  		return NULL;
534  	field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
535  	if (!field || !(field->type & field_mask))
536  		return NULL;
537  	return field;
538  }
539  
btf_record_free(struct btf_record * rec)540  void btf_record_free(struct btf_record *rec)
541  {
542  	int i;
543  
544  	if (IS_ERR_OR_NULL(rec))
545  		return;
546  	for (i = 0; i < rec->cnt; i++) {
547  		switch (rec->fields[i].type) {
548  		case BPF_KPTR_UNREF:
549  		case BPF_KPTR_REF:
550  		case BPF_KPTR_PERCPU:
551  			if (rec->fields[i].kptr.module)
552  				module_put(rec->fields[i].kptr.module);
553  			if (btf_is_kernel(rec->fields[i].kptr.btf))
554  				btf_put(rec->fields[i].kptr.btf);
555  			break;
556  		case BPF_LIST_HEAD:
557  		case BPF_LIST_NODE:
558  		case BPF_RB_ROOT:
559  		case BPF_RB_NODE:
560  		case BPF_SPIN_LOCK:
561  		case BPF_TIMER:
562  		case BPF_REFCOUNT:
563  		case BPF_WORKQUEUE:
564  			/* Nothing to release */
565  			break;
566  		default:
567  			WARN_ON_ONCE(1);
568  			continue;
569  		}
570  	}
571  	kfree(rec);
572  }
573  
bpf_map_free_record(struct bpf_map * map)574  void bpf_map_free_record(struct bpf_map *map)
575  {
576  	btf_record_free(map->record);
577  	map->record = NULL;
578  }
579  
btf_record_dup(const struct btf_record * rec)580  struct btf_record *btf_record_dup(const struct btf_record *rec)
581  {
582  	const struct btf_field *fields;
583  	struct btf_record *new_rec;
584  	int ret, size, i;
585  
586  	if (IS_ERR_OR_NULL(rec))
587  		return NULL;
588  	size = offsetof(struct btf_record, fields[rec->cnt]);
589  	new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
590  	if (!new_rec)
591  		return ERR_PTR(-ENOMEM);
592  	/* Do a deep copy of the btf_record */
593  	fields = rec->fields;
594  	new_rec->cnt = 0;
595  	for (i = 0; i < rec->cnt; i++) {
596  		switch (fields[i].type) {
597  		case BPF_KPTR_UNREF:
598  		case BPF_KPTR_REF:
599  		case BPF_KPTR_PERCPU:
600  			if (btf_is_kernel(fields[i].kptr.btf))
601  				btf_get(fields[i].kptr.btf);
602  			if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
603  				ret = -ENXIO;
604  				goto free;
605  			}
606  			break;
607  		case BPF_LIST_HEAD:
608  		case BPF_LIST_NODE:
609  		case BPF_RB_ROOT:
610  		case BPF_RB_NODE:
611  		case BPF_SPIN_LOCK:
612  		case BPF_TIMER:
613  		case BPF_REFCOUNT:
614  		case BPF_WORKQUEUE:
615  			/* Nothing to acquire */
616  			break;
617  		default:
618  			ret = -EFAULT;
619  			WARN_ON_ONCE(1);
620  			goto free;
621  		}
622  		new_rec->cnt++;
623  	}
624  	return new_rec;
625  free:
626  	btf_record_free(new_rec);
627  	return ERR_PTR(ret);
628  }
629  
btf_record_equal(const struct btf_record * rec_a,const struct btf_record * rec_b)630  bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
631  {
632  	bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
633  	int size;
634  
635  	if (!a_has_fields && !b_has_fields)
636  		return true;
637  	if (a_has_fields != b_has_fields)
638  		return false;
639  	if (rec_a->cnt != rec_b->cnt)
640  		return false;
641  	size = offsetof(struct btf_record, fields[rec_a->cnt]);
642  	/* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
643  	 * members are zeroed out. So memcmp is safe to do without worrying
644  	 * about padding/unused fields.
645  	 *
646  	 * While spin_lock, timer, and kptr have no relation to map BTF,
647  	 * list_head metadata is specific to map BTF, the btf and value_rec
648  	 * members in particular. btf is the map BTF, while value_rec points to
649  	 * btf_record in that map BTF.
650  	 *
651  	 * So while by default, we don't rely on the map BTF (which the records
652  	 * were parsed from) matching for both records, which is not backwards
653  	 * compatible, in case list_head is part of it, we implicitly rely on
654  	 * that by way of depending on memcmp succeeding for it.
655  	 */
656  	return !memcmp(rec_a, rec_b, size);
657  }
658  
bpf_obj_free_timer(const struct btf_record * rec,void * obj)659  void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
660  {
661  	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
662  		return;
663  	bpf_timer_cancel_and_free(obj + rec->timer_off);
664  }
665  
bpf_obj_free_workqueue(const struct btf_record * rec,void * obj)666  void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
667  {
668  	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
669  		return;
670  	bpf_wq_cancel_and_free(obj + rec->wq_off);
671  }
672  
bpf_obj_free_fields(const struct btf_record * rec,void * obj)673  void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
674  {
675  	const struct btf_field *fields;
676  	int i;
677  
678  	if (IS_ERR_OR_NULL(rec))
679  		return;
680  	fields = rec->fields;
681  	for (i = 0; i < rec->cnt; i++) {
682  		struct btf_struct_meta *pointee_struct_meta;
683  		const struct btf_field *field = &fields[i];
684  		void *field_ptr = obj + field->offset;
685  		void *xchgd_field;
686  
687  		switch (fields[i].type) {
688  		case BPF_SPIN_LOCK:
689  			break;
690  		case BPF_TIMER:
691  			bpf_timer_cancel_and_free(field_ptr);
692  			break;
693  		case BPF_WORKQUEUE:
694  			bpf_wq_cancel_and_free(field_ptr);
695  			break;
696  		case BPF_KPTR_UNREF:
697  			WRITE_ONCE(*(u64 *)field_ptr, 0);
698  			break;
699  		case BPF_KPTR_REF:
700  		case BPF_KPTR_PERCPU:
701  			xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
702  			if (!xchgd_field)
703  				break;
704  
705  			if (!btf_is_kernel(field->kptr.btf)) {
706  				pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
707  									   field->kptr.btf_id);
708  				migrate_disable();
709  				__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
710  								 pointee_struct_meta->record : NULL,
711  								 fields[i].type == BPF_KPTR_PERCPU);
712  				migrate_enable();
713  			} else {
714  				field->kptr.dtor(xchgd_field);
715  			}
716  			break;
717  		case BPF_LIST_HEAD:
718  			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
719  				continue;
720  			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
721  			break;
722  		case BPF_RB_ROOT:
723  			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
724  				continue;
725  			bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
726  			break;
727  		case BPF_LIST_NODE:
728  		case BPF_RB_NODE:
729  		case BPF_REFCOUNT:
730  			break;
731  		default:
732  			WARN_ON_ONCE(1);
733  			continue;
734  		}
735  	}
736  }
737  
bpf_map_free(struct bpf_map * map)738  static void bpf_map_free(struct bpf_map *map)
739  {
740  	struct btf_record *rec = map->record;
741  	struct btf *btf = map->btf;
742  
743  	/* implementation dependent freeing */
744  	map->ops->map_free(map);
745  	/* Delay freeing of btf_record for maps, as map_free
746  	 * callback usually needs access to them. It is better to do it here
747  	 * than require each callback to do the free itself manually.
748  	 *
749  	 * Note that the btf_record stashed in map->inner_map_meta->record was
750  	 * already freed using the map_free callback for map in map case which
751  	 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
752  	 * template bpf_map struct used during verification.
753  	 */
754  	btf_record_free(rec);
755  	/* Delay freeing of btf for maps, as map_free callback may need
756  	 * struct_meta info which will be freed with btf_put().
757  	 */
758  	btf_put(btf);
759  }
760  
761  /* called from workqueue */
bpf_map_free_deferred(struct work_struct * work)762  static void bpf_map_free_deferred(struct work_struct *work)
763  {
764  	struct bpf_map *map = container_of(work, struct bpf_map, work);
765  
766  	security_bpf_map_free(map);
767  	bpf_map_release_memcg(map);
768  	bpf_map_free(map);
769  }
770  
bpf_map_put_uref(struct bpf_map * map)771  static void bpf_map_put_uref(struct bpf_map *map)
772  {
773  	if (atomic64_dec_and_test(&map->usercnt)) {
774  		if (map->ops->map_release_uref)
775  			map->ops->map_release_uref(map);
776  	}
777  }
778  
bpf_map_free_in_work(struct bpf_map * map)779  static void bpf_map_free_in_work(struct bpf_map *map)
780  {
781  	INIT_WORK(&map->work, bpf_map_free_deferred);
782  	/* Avoid spawning kworkers, since they all might contend
783  	 * for the same mutex like slab_mutex.
784  	 */
785  	queue_work(system_unbound_wq, &map->work);
786  }
787  
bpf_map_free_rcu_gp(struct rcu_head * rcu)788  static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
789  {
790  	bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
791  }
792  
bpf_map_free_mult_rcu_gp(struct rcu_head * rcu)793  static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
794  {
795  	if (rcu_trace_implies_rcu_gp())
796  		bpf_map_free_rcu_gp(rcu);
797  	else
798  		call_rcu(rcu, bpf_map_free_rcu_gp);
799  }
800  
801  /* decrement map refcnt and schedule it for freeing via workqueue
802   * (underlying map implementation ops->map_free() might sleep)
803   */
bpf_map_put(struct bpf_map * map)804  void bpf_map_put(struct bpf_map *map)
805  {
806  	if (atomic64_dec_and_test(&map->refcnt)) {
807  		/* bpf_map_free_id() must be called first */
808  		bpf_map_free_id(map);
809  
810  		WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
811  		if (READ_ONCE(map->free_after_mult_rcu_gp))
812  			call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
813  		else if (READ_ONCE(map->free_after_rcu_gp))
814  			call_rcu(&map->rcu, bpf_map_free_rcu_gp);
815  		else
816  			bpf_map_free_in_work(map);
817  	}
818  }
819  EXPORT_SYMBOL_GPL(bpf_map_put);
820  
bpf_map_put_with_uref(struct bpf_map * map)821  void bpf_map_put_with_uref(struct bpf_map *map)
822  {
823  	bpf_map_put_uref(map);
824  	bpf_map_put(map);
825  }
826  
bpf_map_release(struct inode * inode,struct file * filp)827  static int bpf_map_release(struct inode *inode, struct file *filp)
828  {
829  	struct bpf_map *map = filp->private_data;
830  
831  	if (map->ops->map_release)
832  		map->ops->map_release(map, filp);
833  
834  	bpf_map_put_with_uref(map);
835  	return 0;
836  }
837  
map_get_sys_perms(struct bpf_map * map,struct fd f)838  static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
839  {
840  	fmode_t mode = fd_file(f)->f_mode;
841  
842  	/* Our file permissions may have been overridden by global
843  	 * map permissions facing syscall side.
844  	 */
845  	if (READ_ONCE(map->frozen))
846  		mode &= ~FMODE_CAN_WRITE;
847  	return mode;
848  }
849  
850  #ifdef CONFIG_PROC_FS
851  /* Show the memory usage of a bpf map */
bpf_map_memory_usage(const struct bpf_map * map)852  static u64 bpf_map_memory_usage(const struct bpf_map *map)
853  {
854  	return map->ops->map_mem_usage(map);
855  }
856  
bpf_map_show_fdinfo(struct seq_file * m,struct file * filp)857  static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
858  {
859  	struct bpf_map *map = filp->private_data;
860  	u32 type = 0, jited = 0;
861  
862  	if (map_type_contains_progs(map)) {
863  		spin_lock(&map->owner.lock);
864  		type  = map->owner.type;
865  		jited = map->owner.jited;
866  		spin_unlock(&map->owner.lock);
867  	}
868  
869  	seq_printf(m,
870  		   "map_type:\t%u\n"
871  		   "key_size:\t%u\n"
872  		   "value_size:\t%u\n"
873  		   "max_entries:\t%u\n"
874  		   "map_flags:\t%#x\n"
875  		   "map_extra:\t%#llx\n"
876  		   "memlock:\t%llu\n"
877  		   "map_id:\t%u\n"
878  		   "frozen:\t%u\n",
879  		   map->map_type,
880  		   map->key_size,
881  		   map->value_size,
882  		   map->max_entries,
883  		   map->map_flags,
884  		   (unsigned long long)map->map_extra,
885  		   bpf_map_memory_usage(map),
886  		   map->id,
887  		   READ_ONCE(map->frozen));
888  	if (type) {
889  		seq_printf(m, "owner_prog_type:\t%u\n", type);
890  		seq_printf(m, "owner_jited:\t%u\n", jited);
891  	}
892  }
893  #endif
894  
bpf_dummy_read(struct file * filp,char __user * buf,size_t siz,loff_t * ppos)895  static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
896  			      loff_t *ppos)
897  {
898  	/* We need this handler such that alloc_file() enables
899  	 * f_mode with FMODE_CAN_READ.
900  	 */
901  	return -EINVAL;
902  }
903  
bpf_dummy_write(struct file * filp,const char __user * buf,size_t siz,loff_t * ppos)904  static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
905  			       size_t siz, loff_t *ppos)
906  {
907  	/* We need this handler such that alloc_file() enables
908  	 * f_mode with FMODE_CAN_WRITE.
909  	 */
910  	return -EINVAL;
911  }
912  
913  /* called for any extra memory-mapped regions (except initial) */
bpf_map_mmap_open(struct vm_area_struct * vma)914  static void bpf_map_mmap_open(struct vm_area_struct *vma)
915  {
916  	struct bpf_map *map = vma->vm_file->private_data;
917  
918  	if (vma->vm_flags & VM_MAYWRITE)
919  		bpf_map_write_active_inc(map);
920  }
921  
922  /* called for all unmapped memory region (including initial) */
bpf_map_mmap_close(struct vm_area_struct * vma)923  static void bpf_map_mmap_close(struct vm_area_struct *vma)
924  {
925  	struct bpf_map *map = vma->vm_file->private_data;
926  
927  	if (vma->vm_flags & VM_MAYWRITE)
928  		bpf_map_write_active_dec(map);
929  }
930  
931  static const struct vm_operations_struct bpf_map_default_vmops = {
932  	.open		= bpf_map_mmap_open,
933  	.close		= bpf_map_mmap_close,
934  };
935  
bpf_map_mmap(struct file * filp,struct vm_area_struct * vma)936  static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
937  {
938  	struct bpf_map *map = filp->private_data;
939  	int err;
940  
941  	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
942  		return -ENOTSUPP;
943  
944  	if (!(vma->vm_flags & VM_SHARED))
945  		return -EINVAL;
946  
947  	mutex_lock(&map->freeze_mutex);
948  
949  	if (vma->vm_flags & VM_WRITE) {
950  		if (map->frozen) {
951  			err = -EPERM;
952  			goto out;
953  		}
954  		/* map is meant to be read-only, so do not allow mapping as
955  		 * writable, because it's possible to leak a writable page
956  		 * reference and allows user-space to still modify it after
957  		 * freezing, while verifier will assume contents do not change
958  		 */
959  		if (map->map_flags & BPF_F_RDONLY_PROG) {
960  			err = -EACCES;
961  			goto out;
962  		}
963  	}
964  
965  	/* set default open/close callbacks */
966  	vma->vm_ops = &bpf_map_default_vmops;
967  	vma->vm_private_data = map;
968  	vm_flags_clear(vma, VM_MAYEXEC);
969  	if (!(vma->vm_flags & VM_WRITE))
970  		/* disallow re-mapping with PROT_WRITE */
971  		vm_flags_clear(vma, VM_MAYWRITE);
972  
973  	err = map->ops->map_mmap(map, vma);
974  	if (err)
975  		goto out;
976  
977  	if (vma->vm_flags & VM_MAYWRITE)
978  		bpf_map_write_active_inc(map);
979  out:
980  	mutex_unlock(&map->freeze_mutex);
981  	return err;
982  }
983  
bpf_map_poll(struct file * filp,struct poll_table_struct * pts)984  static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
985  {
986  	struct bpf_map *map = filp->private_data;
987  
988  	if (map->ops->map_poll)
989  		return map->ops->map_poll(map, filp, pts);
990  
991  	return EPOLLERR;
992  }
993  
bpf_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)994  static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
995  					   unsigned long len, unsigned long pgoff,
996  					   unsigned long flags)
997  {
998  	struct bpf_map *map = filp->private_data;
999  
1000  	if (map->ops->map_get_unmapped_area)
1001  		return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
1002  #ifdef CONFIG_MMU
1003  	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
1004  #else
1005  	return addr;
1006  #endif
1007  }
1008  
1009  const struct file_operations bpf_map_fops = {
1010  #ifdef CONFIG_PROC_FS
1011  	.show_fdinfo	= bpf_map_show_fdinfo,
1012  #endif
1013  	.release	= bpf_map_release,
1014  	.read		= bpf_dummy_read,
1015  	.write		= bpf_dummy_write,
1016  	.mmap		= bpf_map_mmap,
1017  	.poll		= bpf_map_poll,
1018  	.get_unmapped_area = bpf_get_unmapped_area,
1019  };
1020  
bpf_map_new_fd(struct bpf_map * map,int flags)1021  int bpf_map_new_fd(struct bpf_map *map, int flags)
1022  {
1023  	int ret;
1024  
1025  	ret = security_bpf_map(map, OPEN_FMODE(flags));
1026  	if (ret < 0)
1027  		return ret;
1028  
1029  	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1030  				flags | O_CLOEXEC);
1031  }
1032  
bpf_get_file_flag(int flags)1033  int bpf_get_file_flag(int flags)
1034  {
1035  	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1036  		return -EINVAL;
1037  	if (flags & BPF_F_RDONLY)
1038  		return O_RDONLY;
1039  	if (flags & BPF_F_WRONLY)
1040  		return O_WRONLY;
1041  	return O_RDWR;
1042  }
1043  
1044  /* helper macro to check that unused fields 'union bpf_attr' are zero */
1045  #define CHECK_ATTR(CMD) \
1046  	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1047  		   sizeof(attr->CMD##_LAST_FIELD), 0, \
1048  		   sizeof(*attr) - \
1049  		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1050  		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
1051  
1052  /* dst and src must have at least "size" number of bytes.
1053   * Return strlen on success and < 0 on error.
1054   */
bpf_obj_name_cpy(char * dst,const char * src,unsigned int size)1055  int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1056  {
1057  	const char *end = src + size;
1058  	const char *orig_src = src;
1059  
1060  	memset(dst, 0, size);
1061  	/* Copy all isalnum(), '_' and '.' chars. */
1062  	while (src < end && *src) {
1063  		if (!isalnum(*src) &&
1064  		    *src != '_' && *src != '.')
1065  			return -EINVAL;
1066  		*dst++ = *src++;
1067  	}
1068  
1069  	/* No '\0' found in "size" number of bytes */
1070  	if (src == end)
1071  		return -EINVAL;
1072  
1073  	return src - orig_src;
1074  }
1075  
map_check_no_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)1076  int map_check_no_btf(const struct bpf_map *map,
1077  		     const struct btf *btf,
1078  		     const struct btf_type *key_type,
1079  		     const struct btf_type *value_type)
1080  {
1081  	return -ENOTSUPP;
1082  }
1083  
map_check_btf(struct bpf_map * map,struct bpf_token * token,const struct btf * btf,u32 btf_key_id,u32 btf_value_id)1084  static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1085  			 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1086  {
1087  	const struct btf_type *key_type, *value_type;
1088  	u32 key_size, value_size;
1089  	int ret = 0;
1090  
1091  	/* Some maps allow key to be unspecified. */
1092  	if (btf_key_id) {
1093  		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1094  		if (!key_type || key_size != map->key_size)
1095  			return -EINVAL;
1096  	} else {
1097  		key_type = btf_type_by_id(btf, 0);
1098  		if (!map->ops->map_check_btf)
1099  			return -EINVAL;
1100  	}
1101  
1102  	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1103  	if (!value_type || value_size != map->value_size)
1104  		return -EINVAL;
1105  
1106  	map->record = btf_parse_fields(btf, value_type,
1107  				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1108  				       BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
1109  				       map->value_size);
1110  	if (!IS_ERR_OR_NULL(map->record)) {
1111  		int i;
1112  
1113  		if (!bpf_token_capable(token, CAP_BPF)) {
1114  			ret = -EPERM;
1115  			goto free_map_tab;
1116  		}
1117  		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1118  			ret = -EACCES;
1119  			goto free_map_tab;
1120  		}
1121  		for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1122  			switch (map->record->field_mask & (1 << i)) {
1123  			case 0:
1124  				continue;
1125  			case BPF_SPIN_LOCK:
1126  				if (map->map_type != BPF_MAP_TYPE_HASH &&
1127  				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1128  				    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1129  				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1130  				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1131  				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1132  				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1133  					ret = -EOPNOTSUPP;
1134  					goto free_map_tab;
1135  				}
1136  				break;
1137  			case BPF_TIMER:
1138  			case BPF_WORKQUEUE:
1139  				if (map->map_type != BPF_MAP_TYPE_HASH &&
1140  				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1141  				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1142  					ret = -EOPNOTSUPP;
1143  					goto free_map_tab;
1144  				}
1145  				break;
1146  			case BPF_KPTR_UNREF:
1147  			case BPF_KPTR_REF:
1148  			case BPF_KPTR_PERCPU:
1149  			case BPF_REFCOUNT:
1150  				if (map->map_type != BPF_MAP_TYPE_HASH &&
1151  				    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1152  				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1153  				    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1154  				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1155  				    map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1156  				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1157  				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1158  				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1159  				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1160  					ret = -EOPNOTSUPP;
1161  					goto free_map_tab;
1162  				}
1163  				break;
1164  			case BPF_LIST_HEAD:
1165  			case BPF_RB_ROOT:
1166  				if (map->map_type != BPF_MAP_TYPE_HASH &&
1167  				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1168  				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1169  					ret = -EOPNOTSUPP;
1170  					goto free_map_tab;
1171  				}
1172  				break;
1173  			default:
1174  				/* Fail if map_type checks are missing for a field type */
1175  				ret = -EOPNOTSUPP;
1176  				goto free_map_tab;
1177  			}
1178  		}
1179  	}
1180  
1181  	ret = btf_check_and_fixup_fields(btf, map->record);
1182  	if (ret < 0)
1183  		goto free_map_tab;
1184  
1185  	if (map->ops->map_check_btf) {
1186  		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1187  		if (ret < 0)
1188  			goto free_map_tab;
1189  	}
1190  
1191  	return ret;
1192  free_map_tab:
1193  	bpf_map_free_record(map);
1194  	return ret;
1195  }
1196  
bpf_net_capable(void)1197  static bool bpf_net_capable(void)
1198  {
1199  	return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1200  }
1201  
1202  #define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1203  /* called via syscall */
map_create(union bpf_attr * attr)1204  static int map_create(union bpf_attr *attr)
1205  {
1206  	const struct bpf_map_ops *ops;
1207  	struct bpf_token *token = NULL;
1208  	int numa_node = bpf_map_attr_numa_node(attr);
1209  	u32 map_type = attr->map_type;
1210  	struct bpf_map *map;
1211  	bool token_flag;
1212  	int f_flags;
1213  	int err;
1214  
1215  	err = CHECK_ATTR(BPF_MAP_CREATE);
1216  	if (err)
1217  		return -EINVAL;
1218  
1219  	/* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1220  	 * to avoid per-map type checks tripping on unknown flag
1221  	 */
1222  	token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1223  	attr->map_flags &= ~BPF_F_TOKEN_FD;
1224  
1225  	if (attr->btf_vmlinux_value_type_id) {
1226  		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1227  		    attr->btf_key_type_id || attr->btf_value_type_id)
1228  			return -EINVAL;
1229  	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1230  		return -EINVAL;
1231  	}
1232  
1233  	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1234  	    attr->map_type != BPF_MAP_TYPE_ARENA &&
1235  	    attr->map_extra != 0)
1236  		return -EINVAL;
1237  
1238  	f_flags = bpf_get_file_flag(attr->map_flags);
1239  	if (f_flags < 0)
1240  		return f_flags;
1241  
1242  	if (numa_node != NUMA_NO_NODE &&
1243  	    ((unsigned int)numa_node >= nr_node_ids ||
1244  	     !node_online(numa_node)))
1245  		return -EINVAL;
1246  
1247  	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1248  	map_type = attr->map_type;
1249  	if (map_type >= ARRAY_SIZE(bpf_map_types))
1250  		return -EINVAL;
1251  	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1252  	ops = bpf_map_types[map_type];
1253  	if (!ops)
1254  		return -EINVAL;
1255  
1256  	if (ops->map_alloc_check) {
1257  		err = ops->map_alloc_check(attr);
1258  		if (err)
1259  			return err;
1260  	}
1261  	if (attr->map_ifindex)
1262  		ops = &bpf_map_offload_ops;
1263  	if (!ops->map_mem_usage)
1264  		return -EINVAL;
1265  
1266  	if (token_flag) {
1267  		token = bpf_token_get_from_fd(attr->map_token_fd);
1268  		if (IS_ERR(token))
1269  			return PTR_ERR(token);
1270  
1271  		/* if current token doesn't grant map creation permissions,
1272  		 * then we can't use this token, so ignore it and rely on
1273  		 * system-wide capabilities checks
1274  		 */
1275  		if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1276  		    !bpf_token_allow_map_type(token, attr->map_type)) {
1277  			bpf_token_put(token);
1278  			token = NULL;
1279  		}
1280  	}
1281  
1282  	err = -EPERM;
1283  
1284  	/* Intent here is for unprivileged_bpf_disabled to block BPF map
1285  	 * creation for unprivileged users; other actions depend
1286  	 * on fd availability and access to bpffs, so are dependent on
1287  	 * object creation success. Even with unprivileged BPF disabled,
1288  	 * capability checks are still carried out.
1289  	 */
1290  	if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1291  		goto put_token;
1292  
1293  	/* check privileged map type permissions */
1294  	switch (map_type) {
1295  	case BPF_MAP_TYPE_ARRAY:
1296  	case BPF_MAP_TYPE_PERCPU_ARRAY:
1297  	case BPF_MAP_TYPE_PROG_ARRAY:
1298  	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1299  	case BPF_MAP_TYPE_CGROUP_ARRAY:
1300  	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1301  	case BPF_MAP_TYPE_HASH:
1302  	case BPF_MAP_TYPE_PERCPU_HASH:
1303  	case BPF_MAP_TYPE_HASH_OF_MAPS:
1304  	case BPF_MAP_TYPE_RINGBUF:
1305  	case BPF_MAP_TYPE_USER_RINGBUF:
1306  	case BPF_MAP_TYPE_CGROUP_STORAGE:
1307  	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1308  		/* unprivileged */
1309  		break;
1310  	case BPF_MAP_TYPE_SK_STORAGE:
1311  	case BPF_MAP_TYPE_INODE_STORAGE:
1312  	case BPF_MAP_TYPE_TASK_STORAGE:
1313  	case BPF_MAP_TYPE_CGRP_STORAGE:
1314  	case BPF_MAP_TYPE_BLOOM_FILTER:
1315  	case BPF_MAP_TYPE_LPM_TRIE:
1316  	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1317  	case BPF_MAP_TYPE_STACK_TRACE:
1318  	case BPF_MAP_TYPE_QUEUE:
1319  	case BPF_MAP_TYPE_STACK:
1320  	case BPF_MAP_TYPE_LRU_HASH:
1321  	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1322  	case BPF_MAP_TYPE_STRUCT_OPS:
1323  	case BPF_MAP_TYPE_CPUMAP:
1324  	case BPF_MAP_TYPE_ARENA:
1325  		if (!bpf_token_capable(token, CAP_BPF))
1326  			goto put_token;
1327  		break;
1328  	case BPF_MAP_TYPE_SOCKMAP:
1329  	case BPF_MAP_TYPE_SOCKHASH:
1330  	case BPF_MAP_TYPE_DEVMAP:
1331  	case BPF_MAP_TYPE_DEVMAP_HASH:
1332  	case BPF_MAP_TYPE_XSKMAP:
1333  		if (!bpf_token_capable(token, CAP_NET_ADMIN))
1334  			goto put_token;
1335  		break;
1336  	default:
1337  		WARN(1, "unsupported map type %d", map_type);
1338  		goto put_token;
1339  	}
1340  
1341  	map = ops->map_alloc(attr);
1342  	if (IS_ERR(map)) {
1343  		err = PTR_ERR(map);
1344  		goto put_token;
1345  	}
1346  	map->ops = ops;
1347  	map->map_type = map_type;
1348  
1349  	err = bpf_obj_name_cpy(map->name, attr->map_name,
1350  			       sizeof(attr->map_name));
1351  	if (err < 0)
1352  		goto free_map;
1353  
1354  	atomic64_set(&map->refcnt, 1);
1355  	atomic64_set(&map->usercnt, 1);
1356  	mutex_init(&map->freeze_mutex);
1357  	spin_lock_init(&map->owner.lock);
1358  
1359  	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1360  	    /* Even the map's value is a kernel's struct,
1361  	     * the bpf_prog.o must have BTF to begin with
1362  	     * to figure out the corresponding kernel's
1363  	     * counter part.  Thus, attr->btf_fd has
1364  	     * to be valid also.
1365  	     */
1366  	    attr->btf_vmlinux_value_type_id) {
1367  		struct btf *btf;
1368  
1369  		btf = btf_get_by_fd(attr->btf_fd);
1370  		if (IS_ERR(btf)) {
1371  			err = PTR_ERR(btf);
1372  			goto free_map;
1373  		}
1374  		if (btf_is_kernel(btf)) {
1375  			btf_put(btf);
1376  			err = -EACCES;
1377  			goto free_map;
1378  		}
1379  		map->btf = btf;
1380  
1381  		if (attr->btf_value_type_id) {
1382  			err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1383  					    attr->btf_value_type_id);
1384  			if (err)
1385  				goto free_map;
1386  		}
1387  
1388  		map->btf_key_type_id = attr->btf_key_type_id;
1389  		map->btf_value_type_id = attr->btf_value_type_id;
1390  		map->btf_vmlinux_value_type_id =
1391  			attr->btf_vmlinux_value_type_id;
1392  	}
1393  
1394  	err = security_bpf_map_create(map, attr, token);
1395  	if (err)
1396  		goto free_map_sec;
1397  
1398  	err = bpf_map_alloc_id(map);
1399  	if (err)
1400  		goto free_map_sec;
1401  
1402  	bpf_map_save_memcg(map);
1403  	bpf_token_put(token);
1404  
1405  	err = bpf_map_new_fd(map, f_flags);
1406  	if (err < 0) {
1407  		/* failed to allocate fd.
1408  		 * bpf_map_put_with_uref() is needed because the above
1409  		 * bpf_map_alloc_id() has published the map
1410  		 * to the userspace and the userspace may
1411  		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1412  		 */
1413  		bpf_map_put_with_uref(map);
1414  		return err;
1415  	}
1416  
1417  	return err;
1418  
1419  free_map_sec:
1420  	security_bpf_map_free(map);
1421  free_map:
1422  	bpf_map_free(map);
1423  put_token:
1424  	bpf_token_put(token);
1425  	return err;
1426  }
1427  
bpf_map_inc(struct bpf_map * map)1428  void bpf_map_inc(struct bpf_map *map)
1429  {
1430  	atomic64_inc(&map->refcnt);
1431  }
1432  EXPORT_SYMBOL_GPL(bpf_map_inc);
1433  
bpf_map_inc_with_uref(struct bpf_map * map)1434  void bpf_map_inc_with_uref(struct bpf_map *map)
1435  {
1436  	atomic64_inc(&map->refcnt);
1437  	atomic64_inc(&map->usercnt);
1438  }
1439  EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1440  
bpf_map_get(u32 ufd)1441  struct bpf_map *bpf_map_get(u32 ufd)
1442  {
1443  	CLASS(fd, f)(ufd);
1444  	struct bpf_map *map = __bpf_map_get(f);
1445  
1446  	if (!IS_ERR(map))
1447  		bpf_map_inc(map);
1448  
1449  	return map;
1450  }
1451  EXPORT_SYMBOL(bpf_map_get);
1452  
bpf_map_get_with_uref(u32 ufd)1453  struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1454  {
1455  	CLASS(fd, f)(ufd);
1456  	struct bpf_map *map = __bpf_map_get(f);
1457  
1458  	if (!IS_ERR(map))
1459  		bpf_map_inc_with_uref(map);
1460  
1461  	return map;
1462  }
1463  
1464  /* map_idr_lock should have been held or the map should have been
1465   * protected by rcu read lock.
1466   */
__bpf_map_inc_not_zero(struct bpf_map * map,bool uref)1467  struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1468  {
1469  	int refold;
1470  
1471  	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1472  	if (!refold)
1473  		return ERR_PTR(-ENOENT);
1474  	if (uref)
1475  		atomic64_inc(&map->usercnt);
1476  
1477  	return map;
1478  }
1479  
bpf_map_inc_not_zero(struct bpf_map * map)1480  struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1481  {
1482  	spin_lock_bh(&map_idr_lock);
1483  	map = __bpf_map_inc_not_zero(map, false);
1484  	spin_unlock_bh(&map_idr_lock);
1485  
1486  	return map;
1487  }
1488  EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1489  
bpf_stackmap_copy(struct bpf_map * map,void * key,void * value)1490  int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1491  {
1492  	return -ENOTSUPP;
1493  }
1494  
__bpf_copy_key(void __user * ukey,u64 key_size)1495  static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1496  {
1497  	if (key_size)
1498  		return vmemdup_user(ukey, key_size);
1499  
1500  	if (ukey)
1501  		return ERR_PTR(-EINVAL);
1502  
1503  	return NULL;
1504  }
1505  
___bpf_copy_key(bpfptr_t ukey,u64 key_size)1506  static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1507  {
1508  	if (key_size)
1509  		return kvmemdup_bpfptr(ukey, key_size);
1510  
1511  	if (!bpfptr_is_null(ukey))
1512  		return ERR_PTR(-EINVAL);
1513  
1514  	return NULL;
1515  }
1516  
1517  /* last field in 'union bpf_attr' used by this command */
1518  #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1519  
map_lookup_elem(union bpf_attr * attr)1520  static int map_lookup_elem(union bpf_attr *attr)
1521  {
1522  	void __user *ukey = u64_to_user_ptr(attr->key);
1523  	void __user *uvalue = u64_to_user_ptr(attr->value);
1524  	struct bpf_map *map;
1525  	void *key, *value;
1526  	u32 value_size;
1527  	int err;
1528  
1529  	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1530  		return -EINVAL;
1531  
1532  	if (attr->flags & ~BPF_F_LOCK)
1533  		return -EINVAL;
1534  
1535  	CLASS(fd, f)(attr->map_fd);
1536  	map = __bpf_map_get(f);
1537  	if (IS_ERR(map))
1538  		return PTR_ERR(map);
1539  	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1540  		return -EPERM;
1541  
1542  	if ((attr->flags & BPF_F_LOCK) &&
1543  	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1544  		return -EINVAL;
1545  
1546  	key = __bpf_copy_key(ukey, map->key_size);
1547  	if (IS_ERR(key))
1548  		return PTR_ERR(key);
1549  
1550  	value_size = bpf_map_value_size(map);
1551  
1552  	err = -ENOMEM;
1553  	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1554  	if (!value)
1555  		goto free_key;
1556  
1557  	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1558  		if (copy_from_user(value, uvalue, value_size))
1559  			err = -EFAULT;
1560  		else
1561  			err = bpf_map_copy_value(map, key, value, attr->flags);
1562  		goto free_value;
1563  	}
1564  
1565  	err = bpf_map_copy_value(map, key, value, attr->flags);
1566  	if (err)
1567  		goto free_value;
1568  
1569  	err = -EFAULT;
1570  	if (copy_to_user(uvalue, value, value_size) != 0)
1571  		goto free_value;
1572  
1573  	err = 0;
1574  
1575  free_value:
1576  	kvfree(value);
1577  free_key:
1578  	kvfree(key);
1579  	return err;
1580  }
1581  
1582  
1583  #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1584  
map_update_elem(union bpf_attr * attr,bpfptr_t uattr)1585  static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1586  {
1587  	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1588  	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1589  	struct bpf_map *map;
1590  	void *key, *value;
1591  	u32 value_size;
1592  	int err;
1593  
1594  	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1595  		return -EINVAL;
1596  
1597  	CLASS(fd, f)(attr->map_fd);
1598  	map = __bpf_map_get(f);
1599  	if (IS_ERR(map))
1600  		return PTR_ERR(map);
1601  	bpf_map_write_active_inc(map);
1602  	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1603  		err = -EPERM;
1604  		goto err_put;
1605  	}
1606  
1607  	if ((attr->flags & BPF_F_LOCK) &&
1608  	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1609  		err = -EINVAL;
1610  		goto err_put;
1611  	}
1612  
1613  	key = ___bpf_copy_key(ukey, map->key_size);
1614  	if (IS_ERR(key)) {
1615  		err = PTR_ERR(key);
1616  		goto err_put;
1617  	}
1618  
1619  	value_size = bpf_map_value_size(map);
1620  	value = kvmemdup_bpfptr(uvalue, value_size);
1621  	if (IS_ERR(value)) {
1622  		err = PTR_ERR(value);
1623  		goto free_key;
1624  	}
1625  
1626  	err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
1627  	if (!err)
1628  		maybe_wait_bpf_programs(map);
1629  
1630  	kvfree(value);
1631  free_key:
1632  	kvfree(key);
1633  err_put:
1634  	bpf_map_write_active_dec(map);
1635  	return err;
1636  }
1637  
1638  #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1639  
map_delete_elem(union bpf_attr * attr,bpfptr_t uattr)1640  static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1641  {
1642  	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1643  	struct bpf_map *map;
1644  	void *key;
1645  	int err;
1646  
1647  	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1648  		return -EINVAL;
1649  
1650  	CLASS(fd, f)(attr->map_fd);
1651  	map = __bpf_map_get(f);
1652  	if (IS_ERR(map))
1653  		return PTR_ERR(map);
1654  	bpf_map_write_active_inc(map);
1655  	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1656  		err = -EPERM;
1657  		goto err_put;
1658  	}
1659  
1660  	key = ___bpf_copy_key(ukey, map->key_size);
1661  	if (IS_ERR(key)) {
1662  		err = PTR_ERR(key);
1663  		goto err_put;
1664  	}
1665  
1666  	if (bpf_map_is_offloaded(map)) {
1667  		err = bpf_map_offload_delete_elem(map, key);
1668  		goto out;
1669  	} else if (IS_FD_PROG_ARRAY(map) ||
1670  		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1671  		/* These maps require sleepable context */
1672  		err = map->ops->map_delete_elem(map, key);
1673  		goto out;
1674  	}
1675  
1676  	bpf_disable_instrumentation();
1677  	rcu_read_lock();
1678  	err = map->ops->map_delete_elem(map, key);
1679  	rcu_read_unlock();
1680  	bpf_enable_instrumentation();
1681  	if (!err)
1682  		maybe_wait_bpf_programs(map);
1683  out:
1684  	kvfree(key);
1685  err_put:
1686  	bpf_map_write_active_dec(map);
1687  	return err;
1688  }
1689  
1690  /* last field in 'union bpf_attr' used by this command */
1691  #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1692  
map_get_next_key(union bpf_attr * attr)1693  static int map_get_next_key(union bpf_attr *attr)
1694  {
1695  	void __user *ukey = u64_to_user_ptr(attr->key);
1696  	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1697  	struct bpf_map *map;
1698  	void *key, *next_key;
1699  	int err;
1700  
1701  	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1702  		return -EINVAL;
1703  
1704  	CLASS(fd, f)(attr->map_fd);
1705  	map = __bpf_map_get(f);
1706  	if (IS_ERR(map))
1707  		return PTR_ERR(map);
1708  	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1709  		return -EPERM;
1710  
1711  	if (ukey) {
1712  		key = __bpf_copy_key(ukey, map->key_size);
1713  		if (IS_ERR(key))
1714  			return PTR_ERR(key);
1715  	} else {
1716  		key = NULL;
1717  	}
1718  
1719  	err = -ENOMEM;
1720  	next_key = kvmalloc(map->key_size, GFP_USER);
1721  	if (!next_key)
1722  		goto free_key;
1723  
1724  	if (bpf_map_is_offloaded(map)) {
1725  		err = bpf_map_offload_get_next_key(map, key, next_key);
1726  		goto out;
1727  	}
1728  
1729  	rcu_read_lock();
1730  	err = map->ops->map_get_next_key(map, key, next_key);
1731  	rcu_read_unlock();
1732  out:
1733  	if (err)
1734  		goto free_next_key;
1735  
1736  	err = -EFAULT;
1737  	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1738  		goto free_next_key;
1739  
1740  	err = 0;
1741  
1742  free_next_key:
1743  	kvfree(next_key);
1744  free_key:
1745  	kvfree(key);
1746  	return err;
1747  }
1748  
generic_map_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1749  int generic_map_delete_batch(struct bpf_map *map,
1750  			     const union bpf_attr *attr,
1751  			     union bpf_attr __user *uattr)
1752  {
1753  	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1754  	u32 cp, max_count;
1755  	int err = 0;
1756  	void *key;
1757  
1758  	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1759  		return -EINVAL;
1760  
1761  	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1762  	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1763  		return -EINVAL;
1764  	}
1765  
1766  	max_count = attr->batch.count;
1767  	if (!max_count)
1768  		return 0;
1769  
1770  	if (put_user(0, &uattr->batch.count))
1771  		return -EFAULT;
1772  
1773  	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1774  	if (!key)
1775  		return -ENOMEM;
1776  
1777  	for (cp = 0; cp < max_count; cp++) {
1778  		err = -EFAULT;
1779  		if (copy_from_user(key, keys + cp * map->key_size,
1780  				   map->key_size))
1781  			break;
1782  
1783  		if (bpf_map_is_offloaded(map)) {
1784  			err = bpf_map_offload_delete_elem(map, key);
1785  			break;
1786  		}
1787  
1788  		bpf_disable_instrumentation();
1789  		rcu_read_lock();
1790  		err = map->ops->map_delete_elem(map, key);
1791  		rcu_read_unlock();
1792  		bpf_enable_instrumentation();
1793  		if (err)
1794  			break;
1795  		cond_resched();
1796  	}
1797  	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1798  		err = -EFAULT;
1799  
1800  	kvfree(key);
1801  
1802  	return err;
1803  }
1804  
generic_map_update_batch(struct bpf_map * map,struct file * map_file,const union bpf_attr * attr,union bpf_attr __user * uattr)1805  int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1806  			     const union bpf_attr *attr,
1807  			     union bpf_attr __user *uattr)
1808  {
1809  	void __user *values = u64_to_user_ptr(attr->batch.values);
1810  	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1811  	u32 value_size, cp, max_count;
1812  	void *key, *value;
1813  	int err = 0;
1814  
1815  	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1816  		return -EINVAL;
1817  
1818  	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1819  	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1820  		return -EINVAL;
1821  	}
1822  
1823  	value_size = bpf_map_value_size(map);
1824  
1825  	max_count = attr->batch.count;
1826  	if (!max_count)
1827  		return 0;
1828  
1829  	if (put_user(0, &uattr->batch.count))
1830  		return -EFAULT;
1831  
1832  	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1833  	if (!key)
1834  		return -ENOMEM;
1835  
1836  	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1837  	if (!value) {
1838  		kvfree(key);
1839  		return -ENOMEM;
1840  	}
1841  
1842  	for (cp = 0; cp < max_count; cp++) {
1843  		err = -EFAULT;
1844  		if (copy_from_user(key, keys + cp * map->key_size,
1845  		    map->key_size) ||
1846  		    copy_from_user(value, values + cp * value_size, value_size))
1847  			break;
1848  
1849  		err = bpf_map_update_value(map, map_file, key, value,
1850  					   attr->batch.elem_flags);
1851  
1852  		if (err)
1853  			break;
1854  		cond_resched();
1855  	}
1856  
1857  	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1858  		err = -EFAULT;
1859  
1860  	kvfree(value);
1861  	kvfree(key);
1862  
1863  	return err;
1864  }
1865  
1866  #define MAP_LOOKUP_RETRIES 3
1867  
generic_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1868  int generic_map_lookup_batch(struct bpf_map *map,
1869  				    const union bpf_attr *attr,
1870  				    union bpf_attr __user *uattr)
1871  {
1872  	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1873  	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1874  	void __user *values = u64_to_user_ptr(attr->batch.values);
1875  	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1876  	void *buf, *buf_prevkey, *prev_key, *key, *value;
1877  	int err, retry = MAP_LOOKUP_RETRIES;
1878  	u32 value_size, cp, max_count;
1879  
1880  	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1881  		return -EINVAL;
1882  
1883  	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1884  	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1885  		return -EINVAL;
1886  
1887  	value_size = bpf_map_value_size(map);
1888  
1889  	max_count = attr->batch.count;
1890  	if (!max_count)
1891  		return 0;
1892  
1893  	if (put_user(0, &uattr->batch.count))
1894  		return -EFAULT;
1895  
1896  	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1897  	if (!buf_prevkey)
1898  		return -ENOMEM;
1899  
1900  	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1901  	if (!buf) {
1902  		kvfree(buf_prevkey);
1903  		return -ENOMEM;
1904  	}
1905  
1906  	err = -EFAULT;
1907  	prev_key = NULL;
1908  	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1909  		goto free_buf;
1910  	key = buf;
1911  	value = key + map->key_size;
1912  	if (ubatch)
1913  		prev_key = buf_prevkey;
1914  
1915  	for (cp = 0; cp < max_count;) {
1916  		rcu_read_lock();
1917  		err = map->ops->map_get_next_key(map, prev_key, key);
1918  		rcu_read_unlock();
1919  		if (err)
1920  			break;
1921  		err = bpf_map_copy_value(map, key, value,
1922  					 attr->batch.elem_flags);
1923  
1924  		if (err == -ENOENT) {
1925  			if (retry) {
1926  				retry--;
1927  				continue;
1928  			}
1929  			err = -EINTR;
1930  			break;
1931  		}
1932  
1933  		if (err)
1934  			goto free_buf;
1935  
1936  		if (copy_to_user(keys + cp * map->key_size, key,
1937  				 map->key_size)) {
1938  			err = -EFAULT;
1939  			goto free_buf;
1940  		}
1941  		if (copy_to_user(values + cp * value_size, value, value_size)) {
1942  			err = -EFAULT;
1943  			goto free_buf;
1944  		}
1945  
1946  		if (!prev_key)
1947  			prev_key = buf_prevkey;
1948  
1949  		swap(prev_key, key);
1950  		retry = MAP_LOOKUP_RETRIES;
1951  		cp++;
1952  		cond_resched();
1953  	}
1954  
1955  	if (err == -EFAULT)
1956  		goto free_buf;
1957  
1958  	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1959  		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1960  		err = -EFAULT;
1961  
1962  free_buf:
1963  	kvfree(buf_prevkey);
1964  	kvfree(buf);
1965  	return err;
1966  }
1967  
1968  #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1969  
map_lookup_and_delete_elem(union bpf_attr * attr)1970  static int map_lookup_and_delete_elem(union bpf_attr *attr)
1971  {
1972  	void __user *ukey = u64_to_user_ptr(attr->key);
1973  	void __user *uvalue = u64_to_user_ptr(attr->value);
1974  	struct bpf_map *map;
1975  	void *key, *value;
1976  	u32 value_size;
1977  	int err;
1978  
1979  	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1980  		return -EINVAL;
1981  
1982  	if (attr->flags & ~BPF_F_LOCK)
1983  		return -EINVAL;
1984  
1985  	CLASS(fd, f)(attr->map_fd);
1986  	map = __bpf_map_get(f);
1987  	if (IS_ERR(map))
1988  		return PTR_ERR(map);
1989  	bpf_map_write_active_inc(map);
1990  	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1991  	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1992  		err = -EPERM;
1993  		goto err_put;
1994  	}
1995  
1996  	if (attr->flags &&
1997  	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
1998  	     map->map_type == BPF_MAP_TYPE_STACK)) {
1999  		err = -EINVAL;
2000  		goto err_put;
2001  	}
2002  
2003  	if ((attr->flags & BPF_F_LOCK) &&
2004  	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2005  		err = -EINVAL;
2006  		goto err_put;
2007  	}
2008  
2009  	key = __bpf_copy_key(ukey, map->key_size);
2010  	if (IS_ERR(key)) {
2011  		err = PTR_ERR(key);
2012  		goto err_put;
2013  	}
2014  
2015  	value_size = bpf_map_value_size(map);
2016  
2017  	err = -ENOMEM;
2018  	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2019  	if (!value)
2020  		goto free_key;
2021  
2022  	err = -ENOTSUPP;
2023  	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2024  	    map->map_type == BPF_MAP_TYPE_STACK) {
2025  		err = map->ops->map_pop_elem(map, value);
2026  	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
2027  		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2028  		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2029  		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2030  		if (!bpf_map_is_offloaded(map)) {
2031  			bpf_disable_instrumentation();
2032  			rcu_read_lock();
2033  			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2034  			rcu_read_unlock();
2035  			bpf_enable_instrumentation();
2036  		}
2037  	}
2038  
2039  	if (err)
2040  		goto free_value;
2041  
2042  	if (copy_to_user(uvalue, value, value_size) != 0) {
2043  		err = -EFAULT;
2044  		goto free_value;
2045  	}
2046  
2047  	err = 0;
2048  
2049  free_value:
2050  	kvfree(value);
2051  free_key:
2052  	kvfree(key);
2053  err_put:
2054  	bpf_map_write_active_dec(map);
2055  	return err;
2056  }
2057  
2058  #define BPF_MAP_FREEZE_LAST_FIELD map_fd
2059  
map_freeze(const union bpf_attr * attr)2060  static int map_freeze(const union bpf_attr *attr)
2061  {
2062  	int err = 0;
2063  	struct bpf_map *map;
2064  
2065  	if (CHECK_ATTR(BPF_MAP_FREEZE))
2066  		return -EINVAL;
2067  
2068  	CLASS(fd, f)(attr->map_fd);
2069  	map = __bpf_map_get(f);
2070  	if (IS_ERR(map))
2071  		return PTR_ERR(map);
2072  
2073  	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
2074  		return -ENOTSUPP;
2075  
2076  	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
2077  		return -EPERM;
2078  
2079  	mutex_lock(&map->freeze_mutex);
2080  	if (bpf_map_write_active(map)) {
2081  		err = -EBUSY;
2082  		goto err_put;
2083  	}
2084  	if (READ_ONCE(map->frozen)) {
2085  		err = -EBUSY;
2086  		goto err_put;
2087  	}
2088  
2089  	WRITE_ONCE(map->frozen, true);
2090  err_put:
2091  	mutex_unlock(&map->freeze_mutex);
2092  	return err;
2093  }
2094  
2095  static const struct bpf_prog_ops * const bpf_prog_types[] = {
2096  #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2097  	[_id] = & _name ## _prog_ops,
2098  #define BPF_MAP_TYPE(_id, _ops)
2099  #define BPF_LINK_TYPE(_id, _name)
2100  #include <linux/bpf_types.h>
2101  #undef BPF_PROG_TYPE
2102  #undef BPF_MAP_TYPE
2103  #undef BPF_LINK_TYPE
2104  };
2105  
find_prog_type(enum bpf_prog_type type,struct bpf_prog * prog)2106  static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2107  {
2108  	const struct bpf_prog_ops *ops;
2109  
2110  	if (type >= ARRAY_SIZE(bpf_prog_types))
2111  		return -EINVAL;
2112  	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2113  	ops = bpf_prog_types[type];
2114  	if (!ops)
2115  		return -EINVAL;
2116  
2117  	if (!bpf_prog_is_offloaded(prog->aux))
2118  		prog->aux->ops = ops;
2119  	else
2120  		prog->aux->ops = &bpf_offload_prog_ops;
2121  	prog->type = type;
2122  	return 0;
2123  }
2124  
2125  enum bpf_audit {
2126  	BPF_AUDIT_LOAD,
2127  	BPF_AUDIT_UNLOAD,
2128  	BPF_AUDIT_MAX,
2129  };
2130  
2131  static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2132  	[BPF_AUDIT_LOAD]   = "LOAD",
2133  	[BPF_AUDIT_UNLOAD] = "UNLOAD",
2134  };
2135  
bpf_audit_prog(const struct bpf_prog * prog,unsigned int op)2136  static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2137  {
2138  	struct audit_context *ctx = NULL;
2139  	struct audit_buffer *ab;
2140  
2141  	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2142  		return;
2143  	if (audit_enabled == AUDIT_OFF)
2144  		return;
2145  	if (!in_irq() && !irqs_disabled())
2146  		ctx = audit_context();
2147  	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2148  	if (unlikely(!ab))
2149  		return;
2150  	audit_log_format(ab, "prog-id=%u op=%s",
2151  			 prog->aux->id, bpf_audit_str[op]);
2152  	audit_log_end(ab);
2153  }
2154  
bpf_prog_alloc_id(struct bpf_prog * prog)2155  static int bpf_prog_alloc_id(struct bpf_prog *prog)
2156  {
2157  	int id;
2158  
2159  	idr_preload(GFP_KERNEL);
2160  	spin_lock_bh(&prog_idr_lock);
2161  	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2162  	if (id > 0)
2163  		prog->aux->id = id;
2164  	spin_unlock_bh(&prog_idr_lock);
2165  	idr_preload_end();
2166  
2167  	/* id is in [1, INT_MAX) */
2168  	if (WARN_ON_ONCE(!id))
2169  		return -ENOSPC;
2170  
2171  	return id > 0 ? 0 : id;
2172  }
2173  
bpf_prog_free_id(struct bpf_prog * prog)2174  void bpf_prog_free_id(struct bpf_prog *prog)
2175  {
2176  	unsigned long flags;
2177  
2178  	/* cBPF to eBPF migrations are currently not in the idr store.
2179  	 * Offloaded programs are removed from the store when their device
2180  	 * disappears - even if someone grabs an fd to them they are unusable,
2181  	 * simply waiting for refcnt to drop to be freed.
2182  	 */
2183  	if (!prog->aux->id)
2184  		return;
2185  
2186  	spin_lock_irqsave(&prog_idr_lock, flags);
2187  	idr_remove(&prog_idr, prog->aux->id);
2188  	prog->aux->id = 0;
2189  	spin_unlock_irqrestore(&prog_idr_lock, flags);
2190  }
2191  
__bpf_prog_put_rcu(struct rcu_head * rcu)2192  static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2193  {
2194  	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2195  
2196  	kvfree(aux->func_info);
2197  	kfree(aux->func_info_aux);
2198  	free_uid(aux->user);
2199  	security_bpf_prog_free(aux->prog);
2200  	bpf_prog_free(aux->prog);
2201  }
2202  
__bpf_prog_put_noref(struct bpf_prog * prog,bool deferred)2203  static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2204  {
2205  	bpf_prog_kallsyms_del_all(prog);
2206  	btf_put(prog->aux->btf);
2207  	module_put(prog->aux->mod);
2208  	kvfree(prog->aux->jited_linfo);
2209  	kvfree(prog->aux->linfo);
2210  	kfree(prog->aux->kfunc_tab);
2211  	if (prog->aux->attach_btf)
2212  		btf_put(prog->aux->attach_btf);
2213  
2214  	if (deferred) {
2215  		if (prog->sleepable)
2216  			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2217  		else
2218  			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2219  	} else {
2220  		__bpf_prog_put_rcu(&prog->aux->rcu);
2221  	}
2222  }
2223  
bpf_prog_put_deferred(struct work_struct * work)2224  static void bpf_prog_put_deferred(struct work_struct *work)
2225  {
2226  	struct bpf_prog_aux *aux;
2227  	struct bpf_prog *prog;
2228  
2229  	aux = container_of(work, struct bpf_prog_aux, work);
2230  	prog = aux->prog;
2231  	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2232  	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2233  	bpf_prog_free_id(prog);
2234  	__bpf_prog_put_noref(prog, true);
2235  }
2236  
__bpf_prog_put(struct bpf_prog * prog)2237  static void __bpf_prog_put(struct bpf_prog *prog)
2238  {
2239  	struct bpf_prog_aux *aux = prog->aux;
2240  
2241  	if (atomic64_dec_and_test(&aux->refcnt)) {
2242  		if (in_irq() || irqs_disabled()) {
2243  			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2244  			schedule_work(&aux->work);
2245  		} else {
2246  			bpf_prog_put_deferred(&aux->work);
2247  		}
2248  	}
2249  }
2250  
bpf_prog_put(struct bpf_prog * prog)2251  void bpf_prog_put(struct bpf_prog *prog)
2252  {
2253  	__bpf_prog_put(prog);
2254  }
2255  EXPORT_SYMBOL_GPL(bpf_prog_put);
2256  
bpf_prog_release(struct inode * inode,struct file * filp)2257  static int bpf_prog_release(struct inode *inode, struct file *filp)
2258  {
2259  	struct bpf_prog *prog = filp->private_data;
2260  
2261  	bpf_prog_put(prog);
2262  	return 0;
2263  }
2264  
2265  struct bpf_prog_kstats {
2266  	u64 nsecs;
2267  	u64 cnt;
2268  	u64 misses;
2269  };
2270  
bpf_prog_inc_misses_counter(struct bpf_prog * prog)2271  void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2272  {
2273  	struct bpf_prog_stats *stats;
2274  	unsigned int flags;
2275  
2276  	stats = this_cpu_ptr(prog->stats);
2277  	flags = u64_stats_update_begin_irqsave(&stats->syncp);
2278  	u64_stats_inc(&stats->misses);
2279  	u64_stats_update_end_irqrestore(&stats->syncp, flags);
2280  }
2281  
bpf_prog_get_stats(const struct bpf_prog * prog,struct bpf_prog_kstats * stats)2282  static void bpf_prog_get_stats(const struct bpf_prog *prog,
2283  			       struct bpf_prog_kstats *stats)
2284  {
2285  	u64 nsecs = 0, cnt = 0, misses = 0;
2286  	int cpu;
2287  
2288  	for_each_possible_cpu(cpu) {
2289  		const struct bpf_prog_stats *st;
2290  		unsigned int start;
2291  		u64 tnsecs, tcnt, tmisses;
2292  
2293  		st = per_cpu_ptr(prog->stats, cpu);
2294  		do {
2295  			start = u64_stats_fetch_begin(&st->syncp);
2296  			tnsecs = u64_stats_read(&st->nsecs);
2297  			tcnt = u64_stats_read(&st->cnt);
2298  			tmisses = u64_stats_read(&st->misses);
2299  		} while (u64_stats_fetch_retry(&st->syncp, start));
2300  		nsecs += tnsecs;
2301  		cnt += tcnt;
2302  		misses += tmisses;
2303  	}
2304  	stats->nsecs = nsecs;
2305  	stats->cnt = cnt;
2306  	stats->misses = misses;
2307  }
2308  
2309  #ifdef CONFIG_PROC_FS
bpf_prog_show_fdinfo(struct seq_file * m,struct file * filp)2310  static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2311  {
2312  	const struct bpf_prog *prog = filp->private_data;
2313  	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2314  	struct bpf_prog_kstats stats;
2315  
2316  	bpf_prog_get_stats(prog, &stats);
2317  	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2318  	seq_printf(m,
2319  		   "prog_type:\t%u\n"
2320  		   "prog_jited:\t%u\n"
2321  		   "prog_tag:\t%s\n"
2322  		   "memlock:\t%llu\n"
2323  		   "prog_id:\t%u\n"
2324  		   "run_time_ns:\t%llu\n"
2325  		   "run_cnt:\t%llu\n"
2326  		   "recursion_misses:\t%llu\n"
2327  		   "verified_insns:\t%u\n",
2328  		   prog->type,
2329  		   prog->jited,
2330  		   prog_tag,
2331  		   prog->pages * 1ULL << PAGE_SHIFT,
2332  		   prog->aux->id,
2333  		   stats.nsecs,
2334  		   stats.cnt,
2335  		   stats.misses,
2336  		   prog->aux->verified_insns);
2337  }
2338  #endif
2339  
2340  const struct file_operations bpf_prog_fops = {
2341  #ifdef CONFIG_PROC_FS
2342  	.show_fdinfo	= bpf_prog_show_fdinfo,
2343  #endif
2344  	.release	= bpf_prog_release,
2345  	.read		= bpf_dummy_read,
2346  	.write		= bpf_dummy_write,
2347  };
2348  
bpf_prog_new_fd(struct bpf_prog * prog)2349  int bpf_prog_new_fd(struct bpf_prog *prog)
2350  {
2351  	int ret;
2352  
2353  	ret = security_bpf_prog(prog);
2354  	if (ret < 0)
2355  		return ret;
2356  
2357  	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2358  				O_RDWR | O_CLOEXEC);
2359  }
2360  
bpf_prog_add(struct bpf_prog * prog,int i)2361  void bpf_prog_add(struct bpf_prog *prog, int i)
2362  {
2363  	atomic64_add(i, &prog->aux->refcnt);
2364  }
2365  EXPORT_SYMBOL_GPL(bpf_prog_add);
2366  
bpf_prog_sub(struct bpf_prog * prog,int i)2367  void bpf_prog_sub(struct bpf_prog *prog, int i)
2368  {
2369  	/* Only to be used for undoing previous bpf_prog_add() in some
2370  	 * error path. We still know that another entity in our call
2371  	 * path holds a reference to the program, thus atomic_sub() can
2372  	 * be safely used in such cases!
2373  	 */
2374  	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2375  }
2376  EXPORT_SYMBOL_GPL(bpf_prog_sub);
2377  
bpf_prog_inc(struct bpf_prog * prog)2378  void bpf_prog_inc(struct bpf_prog *prog)
2379  {
2380  	atomic64_inc(&prog->aux->refcnt);
2381  }
2382  EXPORT_SYMBOL_GPL(bpf_prog_inc);
2383  
2384  /* prog_idr_lock should have been held */
bpf_prog_inc_not_zero(struct bpf_prog * prog)2385  struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2386  {
2387  	int refold;
2388  
2389  	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2390  
2391  	if (!refold)
2392  		return ERR_PTR(-ENOENT);
2393  
2394  	return prog;
2395  }
2396  EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2397  
bpf_prog_get_ok(struct bpf_prog * prog,enum bpf_prog_type * attach_type,bool attach_drv)2398  bool bpf_prog_get_ok(struct bpf_prog *prog,
2399  			    enum bpf_prog_type *attach_type, bool attach_drv)
2400  {
2401  	/* not an attachment, just a refcount inc, always allow */
2402  	if (!attach_type)
2403  		return true;
2404  
2405  	if (prog->type != *attach_type)
2406  		return false;
2407  	if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2408  		return false;
2409  
2410  	return true;
2411  }
2412  
__bpf_prog_get(u32 ufd,enum bpf_prog_type * attach_type,bool attach_drv)2413  static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2414  				       bool attach_drv)
2415  {
2416  	CLASS(fd, f)(ufd);
2417  	struct bpf_prog *prog;
2418  
2419  	if (fd_empty(f))
2420  		return ERR_PTR(-EBADF);
2421  	if (fd_file(f)->f_op != &bpf_prog_fops)
2422  		return ERR_PTR(-EINVAL);
2423  
2424  	prog = fd_file(f)->private_data;
2425  	if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
2426  		return ERR_PTR(-EINVAL);
2427  
2428  	bpf_prog_inc(prog);
2429  	return prog;
2430  }
2431  
bpf_prog_get(u32 ufd)2432  struct bpf_prog *bpf_prog_get(u32 ufd)
2433  {
2434  	return __bpf_prog_get(ufd, NULL, false);
2435  }
2436  
bpf_prog_get_type_dev(u32 ufd,enum bpf_prog_type type,bool attach_drv)2437  struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2438  				       bool attach_drv)
2439  {
2440  	return __bpf_prog_get(ufd, &type, attach_drv);
2441  }
2442  EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2443  
2444  /* Initially all BPF programs could be loaded w/o specifying
2445   * expected_attach_type. Later for some of them specifying expected_attach_type
2446   * at load time became required so that program could be validated properly.
2447   * Programs of types that are allowed to be loaded both w/ and w/o (for
2448   * backward compatibility) expected_attach_type, should have the default attach
2449   * type assigned to expected_attach_type for the latter case, so that it can be
2450   * validated later at attach time.
2451   *
2452   * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2453   * prog type requires it but has some attach types that have to be backward
2454   * compatible.
2455   */
bpf_prog_load_fixup_attach_type(union bpf_attr * attr)2456  static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2457  {
2458  	switch (attr->prog_type) {
2459  	case BPF_PROG_TYPE_CGROUP_SOCK:
2460  		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2461  		 * exist so checking for non-zero is the way to go here.
2462  		 */
2463  		if (!attr->expected_attach_type)
2464  			attr->expected_attach_type =
2465  				BPF_CGROUP_INET_SOCK_CREATE;
2466  		break;
2467  	case BPF_PROG_TYPE_SK_REUSEPORT:
2468  		if (!attr->expected_attach_type)
2469  			attr->expected_attach_type =
2470  				BPF_SK_REUSEPORT_SELECT;
2471  		break;
2472  	}
2473  }
2474  
2475  static int
bpf_prog_load_check_attach(enum bpf_prog_type prog_type,enum bpf_attach_type expected_attach_type,struct btf * attach_btf,u32 btf_id,struct bpf_prog * dst_prog)2476  bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2477  			   enum bpf_attach_type expected_attach_type,
2478  			   struct btf *attach_btf, u32 btf_id,
2479  			   struct bpf_prog *dst_prog)
2480  {
2481  	if (btf_id) {
2482  		if (btf_id > BTF_MAX_TYPE)
2483  			return -EINVAL;
2484  
2485  		if (!attach_btf && !dst_prog)
2486  			return -EINVAL;
2487  
2488  		switch (prog_type) {
2489  		case BPF_PROG_TYPE_TRACING:
2490  		case BPF_PROG_TYPE_LSM:
2491  		case BPF_PROG_TYPE_STRUCT_OPS:
2492  		case BPF_PROG_TYPE_EXT:
2493  			break;
2494  		default:
2495  			return -EINVAL;
2496  		}
2497  	}
2498  
2499  	if (attach_btf && (!btf_id || dst_prog))
2500  		return -EINVAL;
2501  
2502  	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2503  	    prog_type != BPF_PROG_TYPE_EXT)
2504  		return -EINVAL;
2505  
2506  	switch (prog_type) {
2507  	case BPF_PROG_TYPE_CGROUP_SOCK:
2508  		switch (expected_attach_type) {
2509  		case BPF_CGROUP_INET_SOCK_CREATE:
2510  		case BPF_CGROUP_INET_SOCK_RELEASE:
2511  		case BPF_CGROUP_INET4_POST_BIND:
2512  		case BPF_CGROUP_INET6_POST_BIND:
2513  			return 0;
2514  		default:
2515  			return -EINVAL;
2516  		}
2517  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2518  		switch (expected_attach_type) {
2519  		case BPF_CGROUP_INET4_BIND:
2520  		case BPF_CGROUP_INET6_BIND:
2521  		case BPF_CGROUP_INET4_CONNECT:
2522  		case BPF_CGROUP_INET6_CONNECT:
2523  		case BPF_CGROUP_UNIX_CONNECT:
2524  		case BPF_CGROUP_INET4_GETPEERNAME:
2525  		case BPF_CGROUP_INET6_GETPEERNAME:
2526  		case BPF_CGROUP_UNIX_GETPEERNAME:
2527  		case BPF_CGROUP_INET4_GETSOCKNAME:
2528  		case BPF_CGROUP_INET6_GETSOCKNAME:
2529  		case BPF_CGROUP_UNIX_GETSOCKNAME:
2530  		case BPF_CGROUP_UDP4_SENDMSG:
2531  		case BPF_CGROUP_UDP6_SENDMSG:
2532  		case BPF_CGROUP_UNIX_SENDMSG:
2533  		case BPF_CGROUP_UDP4_RECVMSG:
2534  		case BPF_CGROUP_UDP6_RECVMSG:
2535  		case BPF_CGROUP_UNIX_RECVMSG:
2536  			return 0;
2537  		default:
2538  			return -EINVAL;
2539  		}
2540  	case BPF_PROG_TYPE_CGROUP_SKB:
2541  		switch (expected_attach_type) {
2542  		case BPF_CGROUP_INET_INGRESS:
2543  		case BPF_CGROUP_INET_EGRESS:
2544  			return 0;
2545  		default:
2546  			return -EINVAL;
2547  		}
2548  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2549  		switch (expected_attach_type) {
2550  		case BPF_CGROUP_SETSOCKOPT:
2551  		case BPF_CGROUP_GETSOCKOPT:
2552  			return 0;
2553  		default:
2554  			return -EINVAL;
2555  		}
2556  	case BPF_PROG_TYPE_SK_LOOKUP:
2557  		if (expected_attach_type == BPF_SK_LOOKUP)
2558  			return 0;
2559  		return -EINVAL;
2560  	case BPF_PROG_TYPE_SK_REUSEPORT:
2561  		switch (expected_attach_type) {
2562  		case BPF_SK_REUSEPORT_SELECT:
2563  		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2564  			return 0;
2565  		default:
2566  			return -EINVAL;
2567  		}
2568  	case BPF_PROG_TYPE_NETFILTER:
2569  		if (expected_attach_type == BPF_NETFILTER)
2570  			return 0;
2571  		return -EINVAL;
2572  	case BPF_PROG_TYPE_SYSCALL:
2573  	case BPF_PROG_TYPE_EXT:
2574  		if (expected_attach_type)
2575  			return -EINVAL;
2576  		fallthrough;
2577  	default:
2578  		return 0;
2579  	}
2580  }
2581  
is_net_admin_prog_type(enum bpf_prog_type prog_type)2582  static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2583  {
2584  	switch (prog_type) {
2585  	case BPF_PROG_TYPE_SCHED_CLS:
2586  	case BPF_PROG_TYPE_SCHED_ACT:
2587  	case BPF_PROG_TYPE_XDP:
2588  	case BPF_PROG_TYPE_LWT_IN:
2589  	case BPF_PROG_TYPE_LWT_OUT:
2590  	case BPF_PROG_TYPE_LWT_XMIT:
2591  	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2592  	case BPF_PROG_TYPE_SK_SKB:
2593  	case BPF_PROG_TYPE_SK_MSG:
2594  	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2595  	case BPF_PROG_TYPE_CGROUP_DEVICE:
2596  	case BPF_PROG_TYPE_CGROUP_SOCK:
2597  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2598  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2599  	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2600  	case BPF_PROG_TYPE_SOCK_OPS:
2601  	case BPF_PROG_TYPE_EXT: /* extends any prog */
2602  	case BPF_PROG_TYPE_NETFILTER:
2603  		return true;
2604  	case BPF_PROG_TYPE_CGROUP_SKB:
2605  		/* always unpriv */
2606  	case BPF_PROG_TYPE_SK_REUSEPORT:
2607  		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2608  	default:
2609  		return false;
2610  	}
2611  }
2612  
is_perfmon_prog_type(enum bpf_prog_type prog_type)2613  static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2614  {
2615  	switch (prog_type) {
2616  	case BPF_PROG_TYPE_KPROBE:
2617  	case BPF_PROG_TYPE_TRACEPOINT:
2618  	case BPF_PROG_TYPE_PERF_EVENT:
2619  	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2620  	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2621  	case BPF_PROG_TYPE_TRACING:
2622  	case BPF_PROG_TYPE_LSM:
2623  	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2624  	case BPF_PROG_TYPE_EXT: /* extends any prog */
2625  		return true;
2626  	default:
2627  		return false;
2628  	}
2629  }
2630  
2631  /* last field in 'union bpf_attr' used by this command */
2632  #define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
2633  
bpf_prog_load(union bpf_attr * attr,bpfptr_t uattr,u32 uattr_size)2634  static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2635  {
2636  	enum bpf_prog_type type = attr->prog_type;
2637  	struct bpf_prog *prog, *dst_prog = NULL;
2638  	struct btf *attach_btf = NULL;
2639  	struct bpf_token *token = NULL;
2640  	bool bpf_cap;
2641  	int err;
2642  	char license[128];
2643  
2644  	if (CHECK_ATTR(BPF_PROG_LOAD))
2645  		return -EINVAL;
2646  
2647  	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2648  				 BPF_F_ANY_ALIGNMENT |
2649  				 BPF_F_TEST_STATE_FREQ |
2650  				 BPF_F_SLEEPABLE |
2651  				 BPF_F_TEST_RND_HI32 |
2652  				 BPF_F_XDP_HAS_FRAGS |
2653  				 BPF_F_XDP_DEV_BOUND_ONLY |
2654  				 BPF_F_TEST_REG_INVARIANTS |
2655  				 BPF_F_TOKEN_FD))
2656  		return -EINVAL;
2657  
2658  	bpf_prog_load_fixup_attach_type(attr);
2659  
2660  	if (attr->prog_flags & BPF_F_TOKEN_FD) {
2661  		token = bpf_token_get_from_fd(attr->prog_token_fd);
2662  		if (IS_ERR(token))
2663  			return PTR_ERR(token);
2664  		/* if current token doesn't grant prog loading permissions,
2665  		 * then we can't use this token, so ignore it and rely on
2666  		 * system-wide capabilities checks
2667  		 */
2668  		if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2669  		    !bpf_token_allow_prog_type(token, attr->prog_type,
2670  					       attr->expected_attach_type)) {
2671  			bpf_token_put(token);
2672  			token = NULL;
2673  		}
2674  	}
2675  
2676  	bpf_cap = bpf_token_capable(token, CAP_BPF);
2677  	err = -EPERM;
2678  
2679  	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2680  	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2681  	    !bpf_cap)
2682  		goto put_token;
2683  
2684  	/* Intent here is for unprivileged_bpf_disabled to block BPF program
2685  	 * creation for unprivileged users; other actions depend
2686  	 * on fd availability and access to bpffs, so are dependent on
2687  	 * object creation success. Even with unprivileged BPF disabled,
2688  	 * capability checks are still carried out for these
2689  	 * and other operations.
2690  	 */
2691  	if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2692  		goto put_token;
2693  
2694  	if (attr->insn_cnt == 0 ||
2695  	    attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2696  		err = -E2BIG;
2697  		goto put_token;
2698  	}
2699  	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2700  	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2701  	    !bpf_cap)
2702  		goto put_token;
2703  
2704  	if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2705  		goto put_token;
2706  	if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2707  		goto put_token;
2708  
2709  	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2710  	 * or btf, we need to check which one it is
2711  	 */
2712  	if (attr->attach_prog_fd) {
2713  		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2714  		if (IS_ERR(dst_prog)) {
2715  			dst_prog = NULL;
2716  			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2717  			if (IS_ERR(attach_btf)) {
2718  				err = -EINVAL;
2719  				goto put_token;
2720  			}
2721  			if (!btf_is_kernel(attach_btf)) {
2722  				/* attaching through specifying bpf_prog's BTF
2723  				 * objects directly might be supported eventually
2724  				 */
2725  				btf_put(attach_btf);
2726  				err = -ENOTSUPP;
2727  				goto put_token;
2728  			}
2729  		}
2730  	} else if (attr->attach_btf_id) {
2731  		/* fall back to vmlinux BTF, if BTF type ID is specified */
2732  		attach_btf = bpf_get_btf_vmlinux();
2733  		if (IS_ERR(attach_btf)) {
2734  			err = PTR_ERR(attach_btf);
2735  			goto put_token;
2736  		}
2737  		if (!attach_btf) {
2738  			err = -EINVAL;
2739  			goto put_token;
2740  		}
2741  		btf_get(attach_btf);
2742  	}
2743  
2744  	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2745  				       attach_btf, attr->attach_btf_id,
2746  				       dst_prog)) {
2747  		if (dst_prog)
2748  			bpf_prog_put(dst_prog);
2749  		if (attach_btf)
2750  			btf_put(attach_btf);
2751  		err = -EINVAL;
2752  		goto put_token;
2753  	}
2754  
2755  	/* plain bpf_prog allocation */
2756  	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2757  	if (!prog) {
2758  		if (dst_prog)
2759  			bpf_prog_put(dst_prog);
2760  		if (attach_btf)
2761  			btf_put(attach_btf);
2762  		err = -EINVAL;
2763  		goto put_token;
2764  	}
2765  
2766  	prog->expected_attach_type = attr->expected_attach_type;
2767  	prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2768  	prog->aux->attach_btf = attach_btf;
2769  	prog->aux->attach_btf_id = attr->attach_btf_id;
2770  	prog->aux->dst_prog = dst_prog;
2771  	prog->aux->dev_bound = !!attr->prog_ifindex;
2772  	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2773  
2774  	/* move token into prog->aux, reuse taken refcnt */
2775  	prog->aux->token = token;
2776  	token = NULL;
2777  
2778  	prog->aux->user = get_current_user();
2779  	prog->len = attr->insn_cnt;
2780  
2781  	err = -EFAULT;
2782  	if (copy_from_bpfptr(prog->insns,
2783  			     make_bpfptr(attr->insns, uattr.is_kernel),
2784  			     bpf_prog_insn_size(prog)) != 0)
2785  		goto free_prog;
2786  	/* copy eBPF program license from user space */
2787  	if (strncpy_from_bpfptr(license,
2788  				make_bpfptr(attr->license, uattr.is_kernel),
2789  				sizeof(license) - 1) < 0)
2790  		goto free_prog;
2791  	license[sizeof(license) - 1] = 0;
2792  
2793  	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2794  	prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2795  
2796  	prog->orig_prog = NULL;
2797  	prog->jited = 0;
2798  
2799  	atomic64_set(&prog->aux->refcnt, 1);
2800  
2801  	if (bpf_prog_is_dev_bound(prog->aux)) {
2802  		err = bpf_prog_dev_bound_init(prog, attr);
2803  		if (err)
2804  			goto free_prog;
2805  	}
2806  
2807  	if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2808  	    bpf_prog_is_dev_bound(dst_prog->aux)) {
2809  		err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2810  		if (err)
2811  			goto free_prog;
2812  	}
2813  
2814  	/*
2815  	 * Bookkeeping for managing the program attachment chain.
2816  	 *
2817  	 * It might be tempting to set attach_tracing_prog flag at the attachment
2818  	 * time, but this will not prevent from loading bunch of tracing prog
2819  	 * first, then attach them one to another.
2820  	 *
2821  	 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2822  	 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2823  	 * programs cannot change attachment target.
2824  	 */
2825  	if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2826  	    dst_prog->type == BPF_PROG_TYPE_TRACING) {
2827  		prog->aux->attach_tracing_prog = true;
2828  	}
2829  
2830  	/* find program type: socket_filter vs tracing_filter */
2831  	err = find_prog_type(type, prog);
2832  	if (err < 0)
2833  		goto free_prog;
2834  
2835  	prog->aux->load_time = ktime_get_boottime_ns();
2836  	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2837  			       sizeof(attr->prog_name));
2838  	if (err < 0)
2839  		goto free_prog;
2840  
2841  	err = security_bpf_prog_load(prog, attr, token);
2842  	if (err)
2843  		goto free_prog_sec;
2844  
2845  	/* run eBPF verifier */
2846  	err = bpf_check(&prog, attr, uattr, uattr_size);
2847  	if (err < 0)
2848  		goto free_used_maps;
2849  
2850  	prog = bpf_prog_select_runtime(prog, &err);
2851  	if (err < 0)
2852  		goto free_used_maps;
2853  
2854  	err = bpf_prog_alloc_id(prog);
2855  	if (err)
2856  		goto free_used_maps;
2857  
2858  	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2859  	 * effectively publicly exposed. However, retrieving via
2860  	 * bpf_prog_get_fd_by_id() will take another reference,
2861  	 * therefore it cannot be gone underneath us.
2862  	 *
2863  	 * Only for the time /after/ successful bpf_prog_new_fd()
2864  	 * and before returning to userspace, we might just hold
2865  	 * one reference and any parallel close on that fd could
2866  	 * rip everything out. Hence, below notifications must
2867  	 * happen before bpf_prog_new_fd().
2868  	 *
2869  	 * Also, any failure handling from this point onwards must
2870  	 * be using bpf_prog_put() given the program is exposed.
2871  	 */
2872  	bpf_prog_kallsyms_add(prog);
2873  	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2874  	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2875  
2876  	err = bpf_prog_new_fd(prog);
2877  	if (err < 0)
2878  		bpf_prog_put(prog);
2879  	return err;
2880  
2881  free_used_maps:
2882  	/* In case we have subprogs, we need to wait for a grace
2883  	 * period before we can tear down JIT memory since symbols
2884  	 * are already exposed under kallsyms.
2885  	 */
2886  	__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2887  	return err;
2888  
2889  free_prog_sec:
2890  	security_bpf_prog_free(prog);
2891  free_prog:
2892  	free_uid(prog->aux->user);
2893  	if (prog->aux->attach_btf)
2894  		btf_put(prog->aux->attach_btf);
2895  	bpf_prog_free(prog);
2896  put_token:
2897  	bpf_token_put(token);
2898  	return err;
2899  }
2900  
2901  #define BPF_OBJ_LAST_FIELD path_fd
2902  
bpf_obj_pin(const union bpf_attr * attr)2903  static int bpf_obj_pin(const union bpf_attr *attr)
2904  {
2905  	int path_fd;
2906  
2907  	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2908  		return -EINVAL;
2909  
2910  	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2911  	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2912  		return -EINVAL;
2913  
2914  	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2915  	return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2916  				u64_to_user_ptr(attr->pathname));
2917  }
2918  
bpf_obj_get(const union bpf_attr * attr)2919  static int bpf_obj_get(const union bpf_attr *attr)
2920  {
2921  	int path_fd;
2922  
2923  	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2924  	    attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2925  		return -EINVAL;
2926  
2927  	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2928  	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2929  		return -EINVAL;
2930  
2931  	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2932  	return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2933  				attr->file_flags);
2934  }
2935  
bpf_link_init(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog)2936  void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2937  		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2938  {
2939  	WARN_ON(ops->dealloc && ops->dealloc_deferred);
2940  	atomic64_set(&link->refcnt, 1);
2941  	link->type = type;
2942  	link->id = 0;
2943  	link->ops = ops;
2944  	link->prog = prog;
2945  }
2946  
bpf_link_free_id(int id)2947  static void bpf_link_free_id(int id)
2948  {
2949  	if (!id)
2950  		return;
2951  
2952  	spin_lock_bh(&link_idr_lock);
2953  	idr_remove(&link_idr, id);
2954  	spin_unlock_bh(&link_idr_lock);
2955  }
2956  
2957  /* Clean up bpf_link and corresponding anon_inode file and FD. After
2958   * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2959   * anon_inode's release() call. This helper marks bpf_link as
2960   * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2961   * is not decremented, it's the responsibility of a calling code that failed
2962   * to complete bpf_link initialization.
2963   * This helper eventually calls link's dealloc callback, but does not call
2964   * link's release callback.
2965   */
bpf_link_cleanup(struct bpf_link_primer * primer)2966  void bpf_link_cleanup(struct bpf_link_primer *primer)
2967  {
2968  	primer->link->prog = NULL;
2969  	bpf_link_free_id(primer->id);
2970  	fput(primer->file);
2971  	put_unused_fd(primer->fd);
2972  }
2973  
bpf_link_inc(struct bpf_link * link)2974  void bpf_link_inc(struct bpf_link *link)
2975  {
2976  	atomic64_inc(&link->refcnt);
2977  }
2978  
bpf_link_defer_dealloc_rcu_gp(struct rcu_head * rcu)2979  static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
2980  {
2981  	struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
2982  
2983  	/* free bpf_link and its containing memory */
2984  	link->ops->dealloc_deferred(link);
2985  }
2986  
bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head * rcu)2987  static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
2988  {
2989  	if (rcu_trace_implies_rcu_gp())
2990  		bpf_link_defer_dealloc_rcu_gp(rcu);
2991  	else
2992  		call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
2993  }
2994  
2995  /* bpf_link_free is guaranteed to be called from process context */
bpf_link_free(struct bpf_link * link)2996  static void bpf_link_free(struct bpf_link *link)
2997  {
2998  	const struct bpf_link_ops *ops = link->ops;
2999  	bool sleepable = false;
3000  
3001  	bpf_link_free_id(link->id);
3002  	if (link->prog) {
3003  		sleepable = link->prog->sleepable;
3004  		/* detach BPF program, clean up used resources */
3005  		ops->release(link);
3006  		bpf_prog_put(link->prog);
3007  	}
3008  	if (ops->dealloc_deferred) {
3009  		/* schedule BPF link deallocation; if underlying BPF program
3010  		 * is sleepable, we need to first wait for RCU tasks trace
3011  		 * sync, then go through "classic" RCU grace period
3012  		 */
3013  		if (sleepable)
3014  			call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3015  		else
3016  			call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3017  	} else if (ops->dealloc)
3018  		ops->dealloc(link);
3019  }
3020  
bpf_link_put_deferred(struct work_struct * work)3021  static void bpf_link_put_deferred(struct work_struct *work)
3022  {
3023  	struct bpf_link *link = container_of(work, struct bpf_link, work);
3024  
3025  	bpf_link_free(link);
3026  }
3027  
3028  /* bpf_link_put might be called from atomic context. It needs to be called
3029   * from sleepable context in order to acquire sleeping locks during the process.
3030   */
bpf_link_put(struct bpf_link * link)3031  void bpf_link_put(struct bpf_link *link)
3032  {
3033  	if (!atomic64_dec_and_test(&link->refcnt))
3034  		return;
3035  
3036  	INIT_WORK(&link->work, bpf_link_put_deferred);
3037  	schedule_work(&link->work);
3038  }
3039  EXPORT_SYMBOL(bpf_link_put);
3040  
bpf_link_put_direct(struct bpf_link * link)3041  static void bpf_link_put_direct(struct bpf_link *link)
3042  {
3043  	if (!atomic64_dec_and_test(&link->refcnt))
3044  		return;
3045  	bpf_link_free(link);
3046  }
3047  
bpf_link_release(struct inode * inode,struct file * filp)3048  static int bpf_link_release(struct inode *inode, struct file *filp)
3049  {
3050  	struct bpf_link *link = filp->private_data;
3051  
3052  	bpf_link_put_direct(link);
3053  	return 0;
3054  }
3055  
3056  #ifdef CONFIG_PROC_FS
3057  #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3058  #define BPF_MAP_TYPE(_id, _ops)
3059  #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3060  static const char *bpf_link_type_strs[] = {
3061  	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3062  #include <linux/bpf_types.h>
3063  };
3064  #undef BPF_PROG_TYPE
3065  #undef BPF_MAP_TYPE
3066  #undef BPF_LINK_TYPE
3067  
bpf_link_show_fdinfo(struct seq_file * m,struct file * filp)3068  static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3069  {
3070  	const struct bpf_link *link = filp->private_data;
3071  	const struct bpf_prog *prog = link->prog;
3072  	enum bpf_link_type type = link->type;
3073  	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3074  
3075  	if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
3076  		seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
3077  	} else {
3078  		WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
3079  		seq_printf(m, "link_type:\t<%u>\n", type);
3080  	}
3081  	seq_printf(m, "link_id:\t%u\n", link->id);
3082  
3083  	if (prog) {
3084  		bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3085  		seq_printf(m,
3086  			   "prog_tag:\t%s\n"
3087  			   "prog_id:\t%u\n",
3088  			   prog_tag,
3089  			   prog->aux->id);
3090  	}
3091  	if (link->ops->show_fdinfo)
3092  		link->ops->show_fdinfo(link, m);
3093  }
3094  #endif
3095  
bpf_link_poll(struct file * file,struct poll_table_struct * pts)3096  static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
3097  {
3098  	struct bpf_link *link = file->private_data;
3099  
3100  	return link->ops->poll(file, pts);
3101  }
3102  
3103  static const struct file_operations bpf_link_fops = {
3104  #ifdef CONFIG_PROC_FS
3105  	.show_fdinfo	= bpf_link_show_fdinfo,
3106  #endif
3107  	.release	= bpf_link_release,
3108  	.read		= bpf_dummy_read,
3109  	.write		= bpf_dummy_write,
3110  };
3111  
3112  static const struct file_operations bpf_link_fops_poll = {
3113  #ifdef CONFIG_PROC_FS
3114  	.show_fdinfo	= bpf_link_show_fdinfo,
3115  #endif
3116  	.release	= bpf_link_release,
3117  	.read		= bpf_dummy_read,
3118  	.write		= bpf_dummy_write,
3119  	.poll		= bpf_link_poll,
3120  };
3121  
bpf_link_alloc_id(struct bpf_link * link)3122  static int bpf_link_alloc_id(struct bpf_link *link)
3123  {
3124  	int id;
3125  
3126  	idr_preload(GFP_KERNEL);
3127  	spin_lock_bh(&link_idr_lock);
3128  	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3129  	spin_unlock_bh(&link_idr_lock);
3130  	idr_preload_end();
3131  
3132  	return id;
3133  }
3134  
3135  /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3136   * reserving unused FD and allocating ID from link_idr. This is to be paired
3137   * with bpf_link_settle() to install FD and ID and expose bpf_link to
3138   * user-space, if bpf_link is successfully attached. If not, bpf_link and
3139   * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3140   * transient state is passed around in struct bpf_link_primer.
3141   * This is preferred way to create and initialize bpf_link, especially when
3142   * there are complicated and expensive operations in between creating bpf_link
3143   * itself and attaching it to BPF hook. By using bpf_link_prime() and
3144   * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3145   * expensive (and potentially failing) roll back operations in a rare case
3146   * that file, FD, or ID can't be allocated.
3147   */
bpf_link_prime(struct bpf_link * link,struct bpf_link_primer * primer)3148  int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3149  {
3150  	struct file *file;
3151  	int fd, id;
3152  
3153  	fd = get_unused_fd_flags(O_CLOEXEC);
3154  	if (fd < 0)
3155  		return fd;
3156  
3157  
3158  	id = bpf_link_alloc_id(link);
3159  	if (id < 0) {
3160  		put_unused_fd(fd);
3161  		return id;
3162  	}
3163  
3164  	file = anon_inode_getfile("bpf_link",
3165  				  link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3166  				  link, O_CLOEXEC);
3167  	if (IS_ERR(file)) {
3168  		bpf_link_free_id(id);
3169  		put_unused_fd(fd);
3170  		return PTR_ERR(file);
3171  	}
3172  
3173  	primer->link = link;
3174  	primer->file = file;
3175  	primer->fd = fd;
3176  	primer->id = id;
3177  	return 0;
3178  }
3179  
bpf_link_settle(struct bpf_link_primer * primer)3180  int bpf_link_settle(struct bpf_link_primer *primer)
3181  {
3182  	/* make bpf_link fetchable by ID */
3183  	spin_lock_bh(&link_idr_lock);
3184  	primer->link->id = primer->id;
3185  	spin_unlock_bh(&link_idr_lock);
3186  	/* make bpf_link fetchable by FD */
3187  	fd_install(primer->fd, primer->file);
3188  	/* pass through installed FD */
3189  	return primer->fd;
3190  }
3191  
bpf_link_new_fd(struct bpf_link * link)3192  int bpf_link_new_fd(struct bpf_link *link)
3193  {
3194  	return anon_inode_getfd("bpf-link",
3195  				link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3196  				link, O_CLOEXEC);
3197  }
3198  
bpf_link_get_from_fd(u32 ufd)3199  struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3200  {
3201  	CLASS(fd, f)(ufd);
3202  	struct bpf_link *link;
3203  
3204  	if (fd_empty(f))
3205  		return ERR_PTR(-EBADF);
3206  	if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
3207  		return ERR_PTR(-EINVAL);
3208  
3209  	link = fd_file(f)->private_data;
3210  	bpf_link_inc(link);
3211  	return link;
3212  }
3213  EXPORT_SYMBOL(bpf_link_get_from_fd);
3214  
bpf_tracing_link_release(struct bpf_link * link)3215  static void bpf_tracing_link_release(struct bpf_link *link)
3216  {
3217  	struct bpf_tracing_link *tr_link =
3218  		container_of(link, struct bpf_tracing_link, link.link);
3219  
3220  	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3221  						tr_link->trampoline));
3222  
3223  	bpf_trampoline_put(tr_link->trampoline);
3224  
3225  	/* tgt_prog is NULL if target is a kernel function */
3226  	if (tr_link->tgt_prog)
3227  		bpf_prog_put(tr_link->tgt_prog);
3228  }
3229  
bpf_tracing_link_dealloc(struct bpf_link * link)3230  static void bpf_tracing_link_dealloc(struct bpf_link *link)
3231  {
3232  	struct bpf_tracing_link *tr_link =
3233  		container_of(link, struct bpf_tracing_link, link.link);
3234  
3235  	kfree(tr_link);
3236  }
3237  
bpf_tracing_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)3238  static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3239  					 struct seq_file *seq)
3240  {
3241  	struct bpf_tracing_link *tr_link =
3242  		container_of(link, struct bpf_tracing_link, link.link);
3243  	u32 target_btf_id, target_obj_id;
3244  
3245  	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3246  				  &target_obj_id, &target_btf_id);
3247  	seq_printf(seq,
3248  		   "attach_type:\t%d\n"
3249  		   "target_obj_id:\t%u\n"
3250  		   "target_btf_id:\t%u\n",
3251  		   tr_link->attach_type,
3252  		   target_obj_id,
3253  		   target_btf_id);
3254  }
3255  
bpf_tracing_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3256  static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3257  					   struct bpf_link_info *info)
3258  {
3259  	struct bpf_tracing_link *tr_link =
3260  		container_of(link, struct bpf_tracing_link, link.link);
3261  
3262  	info->tracing.attach_type = tr_link->attach_type;
3263  	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3264  				  &info->tracing.target_obj_id,
3265  				  &info->tracing.target_btf_id);
3266  
3267  	return 0;
3268  }
3269  
3270  static const struct bpf_link_ops bpf_tracing_link_lops = {
3271  	.release = bpf_tracing_link_release,
3272  	.dealloc = bpf_tracing_link_dealloc,
3273  	.show_fdinfo = bpf_tracing_link_show_fdinfo,
3274  	.fill_link_info = bpf_tracing_link_fill_link_info,
3275  };
3276  
bpf_tracing_prog_attach(struct bpf_prog * prog,int tgt_prog_fd,u32 btf_id,u64 bpf_cookie)3277  static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3278  				   int tgt_prog_fd,
3279  				   u32 btf_id,
3280  				   u64 bpf_cookie)
3281  {
3282  	struct bpf_link_primer link_primer;
3283  	struct bpf_prog *tgt_prog = NULL;
3284  	struct bpf_trampoline *tr = NULL;
3285  	struct bpf_tracing_link *link;
3286  	u64 key = 0;
3287  	int err;
3288  
3289  	switch (prog->type) {
3290  	case BPF_PROG_TYPE_TRACING:
3291  		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3292  		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
3293  		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
3294  			err = -EINVAL;
3295  			goto out_put_prog;
3296  		}
3297  		break;
3298  	case BPF_PROG_TYPE_EXT:
3299  		if (prog->expected_attach_type != 0) {
3300  			err = -EINVAL;
3301  			goto out_put_prog;
3302  		}
3303  		break;
3304  	case BPF_PROG_TYPE_LSM:
3305  		if (prog->expected_attach_type != BPF_LSM_MAC) {
3306  			err = -EINVAL;
3307  			goto out_put_prog;
3308  		}
3309  		break;
3310  	default:
3311  		err = -EINVAL;
3312  		goto out_put_prog;
3313  	}
3314  
3315  	if (!!tgt_prog_fd != !!btf_id) {
3316  		err = -EINVAL;
3317  		goto out_put_prog;
3318  	}
3319  
3320  	if (tgt_prog_fd) {
3321  		/*
3322  		 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3323  		 * part would be changed to implement the same for
3324  		 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3325  		 * attach_tracing_prog flag is set.
3326  		 */
3327  		if (prog->type != BPF_PROG_TYPE_EXT) {
3328  			err = -EINVAL;
3329  			goto out_put_prog;
3330  		}
3331  
3332  		tgt_prog = bpf_prog_get(tgt_prog_fd);
3333  		if (IS_ERR(tgt_prog)) {
3334  			err = PTR_ERR(tgt_prog);
3335  			tgt_prog = NULL;
3336  			goto out_put_prog;
3337  		}
3338  
3339  		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3340  	}
3341  
3342  	link = kzalloc(sizeof(*link), GFP_USER);
3343  	if (!link) {
3344  		err = -ENOMEM;
3345  		goto out_put_prog;
3346  	}
3347  	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3348  		      &bpf_tracing_link_lops, prog);
3349  	link->attach_type = prog->expected_attach_type;
3350  	link->link.cookie = bpf_cookie;
3351  
3352  	mutex_lock(&prog->aux->dst_mutex);
3353  
3354  	/* There are a few possible cases here:
3355  	 *
3356  	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3357  	 *   and not yet attached to anything, so we can use the values stored
3358  	 *   in prog->aux
3359  	 *
3360  	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3361           *   attached to a target and its initial target was cleared (below)
3362  	 *
3363  	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3364  	 *   target_btf_id using the link_create API.
3365  	 *
3366  	 * - if tgt_prog == NULL when this function was called using the old
3367  	 *   raw_tracepoint_open API, and we need a target from prog->aux
3368  	 *
3369  	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3370  	 *   was detached and is going for re-attachment.
3371  	 *
3372  	 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3373  	 *   are NULL, then program was already attached and user did not provide
3374  	 *   tgt_prog_fd so we have no way to find out or create trampoline
3375  	 */
3376  	if (!prog->aux->dst_trampoline && !tgt_prog) {
3377  		/*
3378  		 * Allow re-attach for TRACING and LSM programs. If it's
3379  		 * currently linked, bpf_trampoline_link_prog will fail.
3380  		 * EXT programs need to specify tgt_prog_fd, so they
3381  		 * re-attach in separate code path.
3382  		 */
3383  		if (prog->type != BPF_PROG_TYPE_TRACING &&
3384  		    prog->type != BPF_PROG_TYPE_LSM) {
3385  			err = -EINVAL;
3386  			goto out_unlock;
3387  		}
3388  		/* We can allow re-attach only if we have valid attach_btf. */
3389  		if (!prog->aux->attach_btf) {
3390  			err = -EINVAL;
3391  			goto out_unlock;
3392  		}
3393  		btf_id = prog->aux->attach_btf_id;
3394  		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3395  	}
3396  
3397  	if (!prog->aux->dst_trampoline ||
3398  	    (key && key != prog->aux->dst_trampoline->key)) {
3399  		/* If there is no saved target, or the specified target is
3400  		 * different from the destination specified at load time, we
3401  		 * need a new trampoline and a check for compatibility
3402  		 */
3403  		struct bpf_attach_target_info tgt_info = {};
3404  
3405  		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3406  					      &tgt_info);
3407  		if (err)
3408  			goto out_unlock;
3409  
3410  		if (tgt_info.tgt_mod) {
3411  			module_put(prog->aux->mod);
3412  			prog->aux->mod = tgt_info.tgt_mod;
3413  		}
3414  
3415  		tr = bpf_trampoline_get(key, &tgt_info);
3416  		if (!tr) {
3417  			err = -ENOMEM;
3418  			goto out_unlock;
3419  		}
3420  	} else {
3421  		/* The caller didn't specify a target, or the target was the
3422  		 * same as the destination supplied during program load. This
3423  		 * means we can reuse the trampoline and reference from program
3424  		 * load time, and there is no need to allocate a new one. This
3425  		 * can only happen once for any program, as the saved values in
3426  		 * prog->aux are cleared below.
3427  		 */
3428  		tr = prog->aux->dst_trampoline;
3429  		tgt_prog = prog->aux->dst_prog;
3430  	}
3431  
3432  	err = bpf_link_prime(&link->link.link, &link_primer);
3433  	if (err)
3434  		goto out_unlock;
3435  
3436  	err = bpf_trampoline_link_prog(&link->link, tr);
3437  	if (err) {
3438  		bpf_link_cleanup(&link_primer);
3439  		link = NULL;
3440  		goto out_unlock;
3441  	}
3442  
3443  	link->tgt_prog = tgt_prog;
3444  	link->trampoline = tr;
3445  
3446  	/* Always clear the trampoline and target prog from prog->aux to make
3447  	 * sure the original attach destination is not kept alive after a
3448  	 * program is (re-)attached to another target.
3449  	 */
3450  	if (prog->aux->dst_prog &&
3451  	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3452  		/* got extra prog ref from syscall, or attaching to different prog */
3453  		bpf_prog_put(prog->aux->dst_prog);
3454  	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3455  		/* we allocated a new trampoline, so free the old one */
3456  		bpf_trampoline_put(prog->aux->dst_trampoline);
3457  
3458  	prog->aux->dst_prog = NULL;
3459  	prog->aux->dst_trampoline = NULL;
3460  	mutex_unlock(&prog->aux->dst_mutex);
3461  
3462  	return bpf_link_settle(&link_primer);
3463  out_unlock:
3464  	if (tr && tr != prog->aux->dst_trampoline)
3465  		bpf_trampoline_put(tr);
3466  	mutex_unlock(&prog->aux->dst_mutex);
3467  	kfree(link);
3468  out_put_prog:
3469  	if (tgt_prog_fd && tgt_prog)
3470  		bpf_prog_put(tgt_prog);
3471  	return err;
3472  }
3473  
bpf_raw_tp_link_release(struct bpf_link * link)3474  static void bpf_raw_tp_link_release(struct bpf_link *link)
3475  {
3476  	struct bpf_raw_tp_link *raw_tp =
3477  		container_of(link, struct bpf_raw_tp_link, link);
3478  
3479  	bpf_probe_unregister(raw_tp->btp, raw_tp);
3480  	bpf_put_raw_tracepoint(raw_tp->btp);
3481  }
3482  
bpf_raw_tp_link_dealloc(struct bpf_link * link)3483  static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3484  {
3485  	struct bpf_raw_tp_link *raw_tp =
3486  		container_of(link, struct bpf_raw_tp_link, link);
3487  
3488  	kfree(raw_tp);
3489  }
3490  
bpf_raw_tp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)3491  static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3492  					struct seq_file *seq)
3493  {
3494  	struct bpf_raw_tp_link *raw_tp_link =
3495  		container_of(link, struct bpf_raw_tp_link, link);
3496  
3497  	seq_printf(seq,
3498  		   "tp_name:\t%s\n",
3499  		   raw_tp_link->btp->tp->name);
3500  }
3501  
bpf_copy_to_user(char __user * ubuf,const char * buf,u32 ulen,u32 len)3502  static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3503  			    u32 len)
3504  {
3505  	if (ulen >= len + 1) {
3506  		if (copy_to_user(ubuf, buf, len + 1))
3507  			return -EFAULT;
3508  	} else {
3509  		char zero = '\0';
3510  
3511  		if (copy_to_user(ubuf, buf, ulen - 1))
3512  			return -EFAULT;
3513  		if (put_user(zero, ubuf + ulen - 1))
3514  			return -EFAULT;
3515  		return -ENOSPC;
3516  	}
3517  
3518  	return 0;
3519  }
3520  
bpf_raw_tp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3521  static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3522  					  struct bpf_link_info *info)
3523  {
3524  	struct bpf_raw_tp_link *raw_tp_link =
3525  		container_of(link, struct bpf_raw_tp_link, link);
3526  	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3527  	const char *tp_name = raw_tp_link->btp->tp->name;
3528  	u32 ulen = info->raw_tracepoint.tp_name_len;
3529  	size_t tp_len = strlen(tp_name);
3530  
3531  	if (!ulen ^ !ubuf)
3532  		return -EINVAL;
3533  
3534  	info->raw_tracepoint.tp_name_len = tp_len + 1;
3535  
3536  	if (!ubuf)
3537  		return 0;
3538  
3539  	return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3540  }
3541  
3542  static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3543  	.release = bpf_raw_tp_link_release,
3544  	.dealloc_deferred = bpf_raw_tp_link_dealloc,
3545  	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3546  	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3547  };
3548  
3549  #ifdef CONFIG_PERF_EVENTS
3550  struct bpf_perf_link {
3551  	struct bpf_link link;
3552  	struct file *perf_file;
3553  };
3554  
bpf_perf_link_release(struct bpf_link * link)3555  static void bpf_perf_link_release(struct bpf_link *link)
3556  {
3557  	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3558  	struct perf_event *event = perf_link->perf_file->private_data;
3559  
3560  	perf_event_free_bpf_prog(event);
3561  	fput(perf_link->perf_file);
3562  }
3563  
bpf_perf_link_dealloc(struct bpf_link * link)3564  static void bpf_perf_link_dealloc(struct bpf_link *link)
3565  {
3566  	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3567  
3568  	kfree(perf_link);
3569  }
3570  
bpf_perf_link_fill_common(const struct perf_event * event,char __user * uname,u32 * ulenp,u64 * probe_offset,u64 * probe_addr,u32 * fd_type,unsigned long * missed)3571  static int bpf_perf_link_fill_common(const struct perf_event *event,
3572  				     char __user *uname, u32 *ulenp,
3573  				     u64 *probe_offset, u64 *probe_addr,
3574  				     u32 *fd_type, unsigned long *missed)
3575  {
3576  	const char *buf;
3577  	u32 prog_id, ulen;
3578  	size_t len;
3579  	int err;
3580  
3581  	ulen = *ulenp;
3582  	if (!ulen ^ !uname)
3583  		return -EINVAL;
3584  
3585  	err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3586  				      probe_offset, probe_addr, missed);
3587  	if (err)
3588  		return err;
3589  
3590  	if (buf) {
3591  		len = strlen(buf);
3592  		*ulenp = len + 1;
3593  	} else {
3594  		*ulenp = 1;
3595  	}
3596  	if (!uname)
3597  		return 0;
3598  
3599  	if (buf) {
3600  		err = bpf_copy_to_user(uname, buf, ulen, len);
3601  		if (err)
3602  			return err;
3603  	} else {
3604  		char zero = '\0';
3605  
3606  		if (put_user(zero, uname))
3607  			return -EFAULT;
3608  	}
3609  	return 0;
3610  }
3611  
3612  #ifdef CONFIG_KPROBE_EVENTS
bpf_perf_link_fill_kprobe(const struct perf_event * event,struct bpf_link_info * info)3613  static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3614  				     struct bpf_link_info *info)
3615  {
3616  	unsigned long missed;
3617  	char __user *uname;
3618  	u64 addr, offset;
3619  	u32 ulen, type;
3620  	int err;
3621  
3622  	uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3623  	ulen = info->perf_event.kprobe.name_len;
3624  	err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3625  					&type, &missed);
3626  	if (err)
3627  		return err;
3628  	if (type == BPF_FD_TYPE_KRETPROBE)
3629  		info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3630  	else
3631  		info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3632  	info->perf_event.kprobe.name_len = ulen;
3633  	info->perf_event.kprobe.offset = offset;
3634  	info->perf_event.kprobe.missed = missed;
3635  	if (!kallsyms_show_value(current_cred()))
3636  		addr = 0;
3637  	info->perf_event.kprobe.addr = addr;
3638  	info->perf_event.kprobe.cookie = event->bpf_cookie;
3639  	return 0;
3640  }
3641  #endif
3642  
3643  #ifdef CONFIG_UPROBE_EVENTS
bpf_perf_link_fill_uprobe(const struct perf_event * event,struct bpf_link_info * info)3644  static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3645  				     struct bpf_link_info *info)
3646  {
3647  	char __user *uname;
3648  	u64 addr, offset;
3649  	u32 ulen, type;
3650  	int err;
3651  
3652  	uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3653  	ulen = info->perf_event.uprobe.name_len;
3654  	err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3655  					&type, NULL);
3656  	if (err)
3657  		return err;
3658  
3659  	if (type == BPF_FD_TYPE_URETPROBE)
3660  		info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3661  	else
3662  		info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3663  	info->perf_event.uprobe.name_len = ulen;
3664  	info->perf_event.uprobe.offset = offset;
3665  	info->perf_event.uprobe.cookie = event->bpf_cookie;
3666  	return 0;
3667  }
3668  #endif
3669  
bpf_perf_link_fill_probe(const struct perf_event * event,struct bpf_link_info * info)3670  static int bpf_perf_link_fill_probe(const struct perf_event *event,
3671  				    struct bpf_link_info *info)
3672  {
3673  #ifdef CONFIG_KPROBE_EVENTS
3674  	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3675  		return bpf_perf_link_fill_kprobe(event, info);
3676  #endif
3677  #ifdef CONFIG_UPROBE_EVENTS
3678  	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3679  		return bpf_perf_link_fill_uprobe(event, info);
3680  #endif
3681  	return -EOPNOTSUPP;
3682  }
3683  
bpf_perf_link_fill_tracepoint(const struct perf_event * event,struct bpf_link_info * info)3684  static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3685  					 struct bpf_link_info *info)
3686  {
3687  	char __user *uname;
3688  	u32 ulen;
3689  	int err;
3690  
3691  	uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3692  	ulen = info->perf_event.tracepoint.name_len;
3693  	err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
3694  	if (err)
3695  		return err;
3696  
3697  	info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3698  	info->perf_event.tracepoint.name_len = ulen;
3699  	info->perf_event.tracepoint.cookie = event->bpf_cookie;
3700  	return 0;
3701  }
3702  
bpf_perf_link_fill_perf_event(const struct perf_event * event,struct bpf_link_info * info)3703  static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3704  					 struct bpf_link_info *info)
3705  {
3706  	info->perf_event.event.type = event->attr.type;
3707  	info->perf_event.event.config = event->attr.config;
3708  	info->perf_event.event.cookie = event->bpf_cookie;
3709  	info->perf_event.type = BPF_PERF_EVENT_EVENT;
3710  	return 0;
3711  }
3712  
bpf_perf_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)3713  static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3714  					struct bpf_link_info *info)
3715  {
3716  	struct bpf_perf_link *perf_link;
3717  	const struct perf_event *event;
3718  
3719  	perf_link = container_of(link, struct bpf_perf_link, link);
3720  	event = perf_get_event(perf_link->perf_file);
3721  	if (IS_ERR(event))
3722  		return PTR_ERR(event);
3723  
3724  	switch (event->prog->type) {
3725  	case BPF_PROG_TYPE_PERF_EVENT:
3726  		return bpf_perf_link_fill_perf_event(event, info);
3727  	case BPF_PROG_TYPE_TRACEPOINT:
3728  		return bpf_perf_link_fill_tracepoint(event, info);
3729  	case BPF_PROG_TYPE_KPROBE:
3730  		return bpf_perf_link_fill_probe(event, info);
3731  	default:
3732  		return -EOPNOTSUPP;
3733  	}
3734  }
3735  
3736  static const struct bpf_link_ops bpf_perf_link_lops = {
3737  	.release = bpf_perf_link_release,
3738  	.dealloc = bpf_perf_link_dealloc,
3739  	.fill_link_info = bpf_perf_link_fill_link_info,
3740  };
3741  
bpf_perf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3742  static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3743  {
3744  	struct bpf_link_primer link_primer;
3745  	struct bpf_perf_link *link;
3746  	struct perf_event *event;
3747  	struct file *perf_file;
3748  	int err;
3749  
3750  	if (attr->link_create.flags)
3751  		return -EINVAL;
3752  
3753  	perf_file = perf_event_get(attr->link_create.target_fd);
3754  	if (IS_ERR(perf_file))
3755  		return PTR_ERR(perf_file);
3756  
3757  	link = kzalloc(sizeof(*link), GFP_USER);
3758  	if (!link) {
3759  		err = -ENOMEM;
3760  		goto out_put_file;
3761  	}
3762  	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3763  	link->perf_file = perf_file;
3764  
3765  	err = bpf_link_prime(&link->link, &link_primer);
3766  	if (err) {
3767  		kfree(link);
3768  		goto out_put_file;
3769  	}
3770  
3771  	event = perf_file->private_data;
3772  	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3773  	if (err) {
3774  		bpf_link_cleanup(&link_primer);
3775  		goto out_put_file;
3776  	}
3777  	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3778  	bpf_prog_inc(prog);
3779  
3780  	return bpf_link_settle(&link_primer);
3781  
3782  out_put_file:
3783  	fput(perf_file);
3784  	return err;
3785  }
3786  #else
bpf_perf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3787  static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3788  {
3789  	return -EOPNOTSUPP;
3790  }
3791  #endif /* CONFIG_PERF_EVENTS */
3792  
bpf_raw_tp_link_attach(struct bpf_prog * prog,const char __user * user_tp_name,u64 cookie)3793  static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3794  				  const char __user *user_tp_name, u64 cookie)
3795  {
3796  	struct bpf_link_primer link_primer;
3797  	struct bpf_raw_tp_link *link;
3798  	struct bpf_raw_event_map *btp;
3799  	const char *tp_name;
3800  	char buf[128];
3801  	int err;
3802  
3803  	switch (prog->type) {
3804  	case BPF_PROG_TYPE_TRACING:
3805  	case BPF_PROG_TYPE_EXT:
3806  	case BPF_PROG_TYPE_LSM:
3807  		if (user_tp_name)
3808  			/* The attach point for this category of programs
3809  			 * should be specified via btf_id during program load.
3810  			 */
3811  			return -EINVAL;
3812  		if (prog->type == BPF_PROG_TYPE_TRACING &&
3813  		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3814  			tp_name = prog->aux->attach_func_name;
3815  			break;
3816  		}
3817  		return bpf_tracing_prog_attach(prog, 0, 0, 0);
3818  	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3819  	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3820  		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3821  			return -EFAULT;
3822  		buf[sizeof(buf) - 1] = 0;
3823  		tp_name = buf;
3824  		break;
3825  	default:
3826  		return -EINVAL;
3827  	}
3828  
3829  	btp = bpf_get_raw_tracepoint(tp_name);
3830  	if (!btp)
3831  		return -ENOENT;
3832  
3833  	link = kzalloc(sizeof(*link), GFP_USER);
3834  	if (!link) {
3835  		err = -ENOMEM;
3836  		goto out_put_btp;
3837  	}
3838  	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3839  		      &bpf_raw_tp_link_lops, prog);
3840  	link->btp = btp;
3841  	link->cookie = cookie;
3842  
3843  	err = bpf_link_prime(&link->link, &link_primer);
3844  	if (err) {
3845  		kfree(link);
3846  		goto out_put_btp;
3847  	}
3848  
3849  	err = bpf_probe_register(link->btp, link);
3850  	if (err) {
3851  		bpf_link_cleanup(&link_primer);
3852  		goto out_put_btp;
3853  	}
3854  
3855  	return bpf_link_settle(&link_primer);
3856  
3857  out_put_btp:
3858  	bpf_put_raw_tracepoint(btp);
3859  	return err;
3860  }
3861  
3862  #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
3863  
bpf_raw_tracepoint_open(const union bpf_attr * attr)3864  static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3865  {
3866  	struct bpf_prog *prog;
3867  	void __user *tp_name;
3868  	__u64 cookie;
3869  	int fd;
3870  
3871  	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3872  		return -EINVAL;
3873  
3874  	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3875  	if (IS_ERR(prog))
3876  		return PTR_ERR(prog);
3877  
3878  	tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
3879  	cookie = attr->raw_tracepoint.cookie;
3880  	fd = bpf_raw_tp_link_attach(prog, tp_name, cookie);
3881  	if (fd < 0)
3882  		bpf_prog_put(prog);
3883  	return fd;
3884  }
3885  
3886  static enum bpf_prog_type
attach_type_to_prog_type(enum bpf_attach_type attach_type)3887  attach_type_to_prog_type(enum bpf_attach_type attach_type)
3888  {
3889  	switch (attach_type) {
3890  	case BPF_CGROUP_INET_INGRESS:
3891  	case BPF_CGROUP_INET_EGRESS:
3892  		return BPF_PROG_TYPE_CGROUP_SKB;
3893  	case BPF_CGROUP_INET_SOCK_CREATE:
3894  	case BPF_CGROUP_INET_SOCK_RELEASE:
3895  	case BPF_CGROUP_INET4_POST_BIND:
3896  	case BPF_CGROUP_INET6_POST_BIND:
3897  		return BPF_PROG_TYPE_CGROUP_SOCK;
3898  	case BPF_CGROUP_INET4_BIND:
3899  	case BPF_CGROUP_INET6_BIND:
3900  	case BPF_CGROUP_INET4_CONNECT:
3901  	case BPF_CGROUP_INET6_CONNECT:
3902  	case BPF_CGROUP_UNIX_CONNECT:
3903  	case BPF_CGROUP_INET4_GETPEERNAME:
3904  	case BPF_CGROUP_INET6_GETPEERNAME:
3905  	case BPF_CGROUP_UNIX_GETPEERNAME:
3906  	case BPF_CGROUP_INET4_GETSOCKNAME:
3907  	case BPF_CGROUP_INET6_GETSOCKNAME:
3908  	case BPF_CGROUP_UNIX_GETSOCKNAME:
3909  	case BPF_CGROUP_UDP4_SENDMSG:
3910  	case BPF_CGROUP_UDP6_SENDMSG:
3911  	case BPF_CGROUP_UNIX_SENDMSG:
3912  	case BPF_CGROUP_UDP4_RECVMSG:
3913  	case BPF_CGROUP_UDP6_RECVMSG:
3914  	case BPF_CGROUP_UNIX_RECVMSG:
3915  		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3916  	case BPF_CGROUP_SOCK_OPS:
3917  		return BPF_PROG_TYPE_SOCK_OPS;
3918  	case BPF_CGROUP_DEVICE:
3919  		return BPF_PROG_TYPE_CGROUP_DEVICE;
3920  	case BPF_SK_MSG_VERDICT:
3921  		return BPF_PROG_TYPE_SK_MSG;
3922  	case BPF_SK_SKB_STREAM_PARSER:
3923  	case BPF_SK_SKB_STREAM_VERDICT:
3924  	case BPF_SK_SKB_VERDICT:
3925  		return BPF_PROG_TYPE_SK_SKB;
3926  	case BPF_LIRC_MODE2:
3927  		return BPF_PROG_TYPE_LIRC_MODE2;
3928  	case BPF_FLOW_DISSECTOR:
3929  		return BPF_PROG_TYPE_FLOW_DISSECTOR;
3930  	case BPF_CGROUP_SYSCTL:
3931  		return BPF_PROG_TYPE_CGROUP_SYSCTL;
3932  	case BPF_CGROUP_GETSOCKOPT:
3933  	case BPF_CGROUP_SETSOCKOPT:
3934  		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3935  	case BPF_TRACE_ITER:
3936  	case BPF_TRACE_RAW_TP:
3937  	case BPF_TRACE_FENTRY:
3938  	case BPF_TRACE_FEXIT:
3939  	case BPF_MODIFY_RETURN:
3940  		return BPF_PROG_TYPE_TRACING;
3941  	case BPF_LSM_MAC:
3942  		return BPF_PROG_TYPE_LSM;
3943  	case BPF_SK_LOOKUP:
3944  		return BPF_PROG_TYPE_SK_LOOKUP;
3945  	case BPF_XDP:
3946  		return BPF_PROG_TYPE_XDP;
3947  	case BPF_LSM_CGROUP:
3948  		return BPF_PROG_TYPE_LSM;
3949  	case BPF_TCX_INGRESS:
3950  	case BPF_TCX_EGRESS:
3951  	case BPF_NETKIT_PRIMARY:
3952  	case BPF_NETKIT_PEER:
3953  		return BPF_PROG_TYPE_SCHED_CLS;
3954  	default:
3955  		return BPF_PROG_TYPE_UNSPEC;
3956  	}
3957  }
3958  
bpf_prog_attach_check_attach_type(const struct bpf_prog * prog,enum bpf_attach_type attach_type)3959  static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3960  					     enum bpf_attach_type attach_type)
3961  {
3962  	enum bpf_prog_type ptype;
3963  
3964  	switch (prog->type) {
3965  	case BPF_PROG_TYPE_CGROUP_SOCK:
3966  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3967  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3968  	case BPF_PROG_TYPE_SK_LOOKUP:
3969  		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3970  	case BPF_PROG_TYPE_CGROUP_SKB:
3971  		if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
3972  			/* cg-skb progs can be loaded by unpriv user.
3973  			 * check permissions at attach time.
3974  			 */
3975  			return -EPERM;
3976  
3977  		ptype = attach_type_to_prog_type(attach_type);
3978  		if (prog->type != ptype)
3979  			return -EINVAL;
3980  
3981  		return prog->enforce_expected_attach_type &&
3982  			prog->expected_attach_type != attach_type ?
3983  			-EINVAL : 0;
3984  	case BPF_PROG_TYPE_EXT:
3985  		return 0;
3986  	case BPF_PROG_TYPE_NETFILTER:
3987  		if (attach_type != BPF_NETFILTER)
3988  			return -EINVAL;
3989  		return 0;
3990  	case BPF_PROG_TYPE_PERF_EVENT:
3991  	case BPF_PROG_TYPE_TRACEPOINT:
3992  		if (attach_type != BPF_PERF_EVENT)
3993  			return -EINVAL;
3994  		return 0;
3995  	case BPF_PROG_TYPE_KPROBE:
3996  		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
3997  		    attach_type != BPF_TRACE_KPROBE_MULTI)
3998  			return -EINVAL;
3999  		if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION &&
4000  		    attach_type != BPF_TRACE_KPROBE_SESSION)
4001  			return -EINVAL;
4002  		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4003  		    attach_type != BPF_TRACE_UPROBE_MULTI)
4004  			return -EINVAL;
4005  		if (attach_type != BPF_PERF_EVENT &&
4006  		    attach_type != BPF_TRACE_KPROBE_MULTI &&
4007  		    attach_type != BPF_TRACE_KPROBE_SESSION &&
4008  		    attach_type != BPF_TRACE_UPROBE_MULTI)
4009  			return -EINVAL;
4010  		return 0;
4011  	case BPF_PROG_TYPE_SCHED_CLS:
4012  		if (attach_type != BPF_TCX_INGRESS &&
4013  		    attach_type != BPF_TCX_EGRESS &&
4014  		    attach_type != BPF_NETKIT_PRIMARY &&
4015  		    attach_type != BPF_NETKIT_PEER)
4016  			return -EINVAL;
4017  		return 0;
4018  	default:
4019  		ptype = attach_type_to_prog_type(attach_type);
4020  		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4021  			return -EINVAL;
4022  		return 0;
4023  	}
4024  }
4025  
4026  #define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4027  
4028  #define BPF_F_ATTACH_MASK_BASE	\
4029  	(BPF_F_ALLOW_OVERRIDE |	\
4030  	 BPF_F_ALLOW_MULTI |	\
4031  	 BPF_F_REPLACE)
4032  
4033  #define BPF_F_ATTACH_MASK_MPROG	\
4034  	(BPF_F_REPLACE |	\
4035  	 BPF_F_BEFORE |		\
4036  	 BPF_F_AFTER |		\
4037  	 BPF_F_ID |		\
4038  	 BPF_F_LINK)
4039  
bpf_prog_attach(const union bpf_attr * attr)4040  static int bpf_prog_attach(const union bpf_attr *attr)
4041  {
4042  	enum bpf_prog_type ptype;
4043  	struct bpf_prog *prog;
4044  	int ret;
4045  
4046  	if (CHECK_ATTR(BPF_PROG_ATTACH))
4047  		return -EINVAL;
4048  
4049  	ptype = attach_type_to_prog_type(attr->attach_type);
4050  	if (ptype == BPF_PROG_TYPE_UNSPEC)
4051  		return -EINVAL;
4052  	if (bpf_mprog_supported(ptype)) {
4053  		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4054  			return -EINVAL;
4055  	} else {
4056  		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4057  			return -EINVAL;
4058  		if (attr->relative_fd ||
4059  		    attr->expected_revision)
4060  			return -EINVAL;
4061  	}
4062  
4063  	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4064  	if (IS_ERR(prog))
4065  		return PTR_ERR(prog);
4066  
4067  	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4068  		bpf_prog_put(prog);
4069  		return -EINVAL;
4070  	}
4071  
4072  	switch (ptype) {
4073  	case BPF_PROG_TYPE_SK_SKB:
4074  	case BPF_PROG_TYPE_SK_MSG:
4075  		ret = sock_map_get_from_fd(attr, prog);
4076  		break;
4077  	case BPF_PROG_TYPE_LIRC_MODE2:
4078  		ret = lirc_prog_attach(attr, prog);
4079  		break;
4080  	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4081  		ret = netns_bpf_prog_attach(attr, prog);
4082  		break;
4083  	case BPF_PROG_TYPE_CGROUP_DEVICE:
4084  	case BPF_PROG_TYPE_CGROUP_SKB:
4085  	case BPF_PROG_TYPE_CGROUP_SOCK:
4086  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4087  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4088  	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4089  	case BPF_PROG_TYPE_SOCK_OPS:
4090  	case BPF_PROG_TYPE_LSM:
4091  		if (ptype == BPF_PROG_TYPE_LSM &&
4092  		    prog->expected_attach_type != BPF_LSM_CGROUP)
4093  			ret = -EINVAL;
4094  		else
4095  			ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4096  		break;
4097  	case BPF_PROG_TYPE_SCHED_CLS:
4098  		if (attr->attach_type == BPF_TCX_INGRESS ||
4099  		    attr->attach_type == BPF_TCX_EGRESS)
4100  			ret = tcx_prog_attach(attr, prog);
4101  		else
4102  			ret = netkit_prog_attach(attr, prog);
4103  		break;
4104  	default:
4105  		ret = -EINVAL;
4106  	}
4107  
4108  	if (ret)
4109  		bpf_prog_put(prog);
4110  	return ret;
4111  }
4112  
4113  #define BPF_PROG_DETACH_LAST_FIELD expected_revision
4114  
bpf_prog_detach(const union bpf_attr * attr)4115  static int bpf_prog_detach(const union bpf_attr *attr)
4116  {
4117  	struct bpf_prog *prog = NULL;
4118  	enum bpf_prog_type ptype;
4119  	int ret;
4120  
4121  	if (CHECK_ATTR(BPF_PROG_DETACH))
4122  		return -EINVAL;
4123  
4124  	ptype = attach_type_to_prog_type(attr->attach_type);
4125  	if (bpf_mprog_supported(ptype)) {
4126  		if (ptype == BPF_PROG_TYPE_UNSPEC)
4127  			return -EINVAL;
4128  		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4129  			return -EINVAL;
4130  		if (attr->attach_bpf_fd) {
4131  			prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4132  			if (IS_ERR(prog))
4133  				return PTR_ERR(prog);
4134  		}
4135  	} else if (attr->attach_flags ||
4136  		   attr->relative_fd ||
4137  		   attr->expected_revision) {
4138  		return -EINVAL;
4139  	}
4140  
4141  	switch (ptype) {
4142  	case BPF_PROG_TYPE_SK_MSG:
4143  	case BPF_PROG_TYPE_SK_SKB:
4144  		ret = sock_map_prog_detach(attr, ptype);
4145  		break;
4146  	case BPF_PROG_TYPE_LIRC_MODE2:
4147  		ret = lirc_prog_detach(attr);
4148  		break;
4149  	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4150  		ret = netns_bpf_prog_detach(attr, ptype);
4151  		break;
4152  	case BPF_PROG_TYPE_CGROUP_DEVICE:
4153  	case BPF_PROG_TYPE_CGROUP_SKB:
4154  	case BPF_PROG_TYPE_CGROUP_SOCK:
4155  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4156  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4157  	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4158  	case BPF_PROG_TYPE_SOCK_OPS:
4159  	case BPF_PROG_TYPE_LSM:
4160  		ret = cgroup_bpf_prog_detach(attr, ptype);
4161  		break;
4162  	case BPF_PROG_TYPE_SCHED_CLS:
4163  		if (attr->attach_type == BPF_TCX_INGRESS ||
4164  		    attr->attach_type == BPF_TCX_EGRESS)
4165  			ret = tcx_prog_detach(attr, prog);
4166  		else
4167  			ret = netkit_prog_detach(attr, prog);
4168  		break;
4169  	default:
4170  		ret = -EINVAL;
4171  	}
4172  
4173  	if (prog)
4174  		bpf_prog_put(prog);
4175  	return ret;
4176  }
4177  
4178  #define BPF_PROG_QUERY_LAST_FIELD query.revision
4179  
bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)4180  static int bpf_prog_query(const union bpf_attr *attr,
4181  			  union bpf_attr __user *uattr)
4182  {
4183  	if (!bpf_net_capable())
4184  		return -EPERM;
4185  	if (CHECK_ATTR(BPF_PROG_QUERY))
4186  		return -EINVAL;
4187  	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4188  		return -EINVAL;
4189  
4190  	switch (attr->query.attach_type) {
4191  	case BPF_CGROUP_INET_INGRESS:
4192  	case BPF_CGROUP_INET_EGRESS:
4193  	case BPF_CGROUP_INET_SOCK_CREATE:
4194  	case BPF_CGROUP_INET_SOCK_RELEASE:
4195  	case BPF_CGROUP_INET4_BIND:
4196  	case BPF_CGROUP_INET6_BIND:
4197  	case BPF_CGROUP_INET4_POST_BIND:
4198  	case BPF_CGROUP_INET6_POST_BIND:
4199  	case BPF_CGROUP_INET4_CONNECT:
4200  	case BPF_CGROUP_INET6_CONNECT:
4201  	case BPF_CGROUP_UNIX_CONNECT:
4202  	case BPF_CGROUP_INET4_GETPEERNAME:
4203  	case BPF_CGROUP_INET6_GETPEERNAME:
4204  	case BPF_CGROUP_UNIX_GETPEERNAME:
4205  	case BPF_CGROUP_INET4_GETSOCKNAME:
4206  	case BPF_CGROUP_INET6_GETSOCKNAME:
4207  	case BPF_CGROUP_UNIX_GETSOCKNAME:
4208  	case BPF_CGROUP_UDP4_SENDMSG:
4209  	case BPF_CGROUP_UDP6_SENDMSG:
4210  	case BPF_CGROUP_UNIX_SENDMSG:
4211  	case BPF_CGROUP_UDP4_RECVMSG:
4212  	case BPF_CGROUP_UDP6_RECVMSG:
4213  	case BPF_CGROUP_UNIX_RECVMSG:
4214  	case BPF_CGROUP_SOCK_OPS:
4215  	case BPF_CGROUP_DEVICE:
4216  	case BPF_CGROUP_SYSCTL:
4217  	case BPF_CGROUP_GETSOCKOPT:
4218  	case BPF_CGROUP_SETSOCKOPT:
4219  	case BPF_LSM_CGROUP:
4220  		return cgroup_bpf_prog_query(attr, uattr);
4221  	case BPF_LIRC_MODE2:
4222  		return lirc_prog_query(attr, uattr);
4223  	case BPF_FLOW_DISSECTOR:
4224  	case BPF_SK_LOOKUP:
4225  		return netns_bpf_prog_query(attr, uattr);
4226  	case BPF_SK_SKB_STREAM_PARSER:
4227  	case BPF_SK_SKB_STREAM_VERDICT:
4228  	case BPF_SK_MSG_VERDICT:
4229  	case BPF_SK_SKB_VERDICT:
4230  		return sock_map_bpf_prog_query(attr, uattr);
4231  	case BPF_TCX_INGRESS:
4232  	case BPF_TCX_EGRESS:
4233  		return tcx_prog_query(attr, uattr);
4234  	case BPF_NETKIT_PRIMARY:
4235  	case BPF_NETKIT_PEER:
4236  		return netkit_prog_query(attr, uattr);
4237  	default:
4238  		return -EINVAL;
4239  	}
4240  }
4241  
4242  #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4243  
bpf_prog_test_run(const union bpf_attr * attr,union bpf_attr __user * uattr)4244  static int bpf_prog_test_run(const union bpf_attr *attr,
4245  			     union bpf_attr __user *uattr)
4246  {
4247  	struct bpf_prog *prog;
4248  	int ret = -ENOTSUPP;
4249  
4250  	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4251  		return -EINVAL;
4252  
4253  	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4254  	    (!attr->test.ctx_size_in && attr->test.ctx_in))
4255  		return -EINVAL;
4256  
4257  	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4258  	    (!attr->test.ctx_size_out && attr->test.ctx_out))
4259  		return -EINVAL;
4260  
4261  	prog = bpf_prog_get(attr->test.prog_fd);
4262  	if (IS_ERR(prog))
4263  		return PTR_ERR(prog);
4264  
4265  	if (prog->aux->ops->test_run)
4266  		ret = prog->aux->ops->test_run(prog, attr, uattr);
4267  
4268  	bpf_prog_put(prog);
4269  	return ret;
4270  }
4271  
4272  #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4273  
bpf_obj_get_next_id(const union bpf_attr * attr,union bpf_attr __user * uattr,struct idr * idr,spinlock_t * lock)4274  static int bpf_obj_get_next_id(const union bpf_attr *attr,
4275  			       union bpf_attr __user *uattr,
4276  			       struct idr *idr,
4277  			       spinlock_t *lock)
4278  {
4279  	u32 next_id = attr->start_id;
4280  	int err = 0;
4281  
4282  	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4283  		return -EINVAL;
4284  
4285  	if (!capable(CAP_SYS_ADMIN))
4286  		return -EPERM;
4287  
4288  	next_id++;
4289  	spin_lock_bh(lock);
4290  	if (!idr_get_next(idr, &next_id))
4291  		err = -ENOENT;
4292  	spin_unlock_bh(lock);
4293  
4294  	if (!err)
4295  		err = put_user(next_id, &uattr->next_id);
4296  
4297  	return err;
4298  }
4299  
bpf_map_get_curr_or_next(u32 * id)4300  struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4301  {
4302  	struct bpf_map *map;
4303  
4304  	spin_lock_bh(&map_idr_lock);
4305  again:
4306  	map = idr_get_next(&map_idr, id);
4307  	if (map) {
4308  		map = __bpf_map_inc_not_zero(map, false);
4309  		if (IS_ERR(map)) {
4310  			(*id)++;
4311  			goto again;
4312  		}
4313  	}
4314  	spin_unlock_bh(&map_idr_lock);
4315  
4316  	return map;
4317  }
4318  
bpf_prog_get_curr_or_next(u32 * id)4319  struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4320  {
4321  	struct bpf_prog *prog;
4322  
4323  	spin_lock_bh(&prog_idr_lock);
4324  again:
4325  	prog = idr_get_next(&prog_idr, id);
4326  	if (prog) {
4327  		prog = bpf_prog_inc_not_zero(prog);
4328  		if (IS_ERR(prog)) {
4329  			(*id)++;
4330  			goto again;
4331  		}
4332  	}
4333  	spin_unlock_bh(&prog_idr_lock);
4334  
4335  	return prog;
4336  }
4337  
4338  #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4339  
bpf_prog_by_id(u32 id)4340  struct bpf_prog *bpf_prog_by_id(u32 id)
4341  {
4342  	struct bpf_prog *prog;
4343  
4344  	if (!id)
4345  		return ERR_PTR(-ENOENT);
4346  
4347  	spin_lock_bh(&prog_idr_lock);
4348  	prog = idr_find(&prog_idr, id);
4349  	if (prog)
4350  		prog = bpf_prog_inc_not_zero(prog);
4351  	else
4352  		prog = ERR_PTR(-ENOENT);
4353  	spin_unlock_bh(&prog_idr_lock);
4354  	return prog;
4355  }
4356  
bpf_prog_get_fd_by_id(const union bpf_attr * attr)4357  static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4358  {
4359  	struct bpf_prog *prog;
4360  	u32 id = attr->prog_id;
4361  	int fd;
4362  
4363  	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4364  		return -EINVAL;
4365  
4366  	if (!capable(CAP_SYS_ADMIN))
4367  		return -EPERM;
4368  
4369  	prog = bpf_prog_by_id(id);
4370  	if (IS_ERR(prog))
4371  		return PTR_ERR(prog);
4372  
4373  	fd = bpf_prog_new_fd(prog);
4374  	if (fd < 0)
4375  		bpf_prog_put(prog);
4376  
4377  	return fd;
4378  }
4379  
4380  #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4381  
bpf_map_get_fd_by_id(const union bpf_attr * attr)4382  static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4383  {
4384  	struct bpf_map *map;
4385  	u32 id = attr->map_id;
4386  	int f_flags;
4387  	int fd;
4388  
4389  	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4390  	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4391  		return -EINVAL;
4392  
4393  	if (!capable(CAP_SYS_ADMIN))
4394  		return -EPERM;
4395  
4396  	f_flags = bpf_get_file_flag(attr->open_flags);
4397  	if (f_flags < 0)
4398  		return f_flags;
4399  
4400  	spin_lock_bh(&map_idr_lock);
4401  	map = idr_find(&map_idr, id);
4402  	if (map)
4403  		map = __bpf_map_inc_not_zero(map, true);
4404  	else
4405  		map = ERR_PTR(-ENOENT);
4406  	spin_unlock_bh(&map_idr_lock);
4407  
4408  	if (IS_ERR(map))
4409  		return PTR_ERR(map);
4410  
4411  	fd = bpf_map_new_fd(map, f_flags);
4412  	if (fd < 0)
4413  		bpf_map_put_with_uref(map);
4414  
4415  	return fd;
4416  }
4417  
bpf_map_from_imm(const struct bpf_prog * prog,unsigned long addr,u32 * off,u32 * type)4418  static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4419  					      unsigned long addr, u32 *off,
4420  					      u32 *type)
4421  {
4422  	const struct bpf_map *map;
4423  	int i;
4424  
4425  	mutex_lock(&prog->aux->used_maps_mutex);
4426  	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4427  		map = prog->aux->used_maps[i];
4428  		if (map == (void *)addr) {
4429  			*type = BPF_PSEUDO_MAP_FD;
4430  			goto out;
4431  		}
4432  		if (!map->ops->map_direct_value_meta)
4433  			continue;
4434  		if (!map->ops->map_direct_value_meta(map, addr, off)) {
4435  			*type = BPF_PSEUDO_MAP_VALUE;
4436  			goto out;
4437  		}
4438  	}
4439  	map = NULL;
4440  
4441  out:
4442  	mutex_unlock(&prog->aux->used_maps_mutex);
4443  	return map;
4444  }
4445  
bpf_insn_prepare_dump(const struct bpf_prog * prog,const struct cred * f_cred)4446  static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4447  					      const struct cred *f_cred)
4448  {
4449  	const struct bpf_map *map;
4450  	struct bpf_insn *insns;
4451  	u32 off, type;
4452  	u64 imm;
4453  	u8 code;
4454  	int i;
4455  
4456  	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4457  			GFP_USER);
4458  	if (!insns)
4459  		return insns;
4460  
4461  	for (i = 0; i < prog->len; i++) {
4462  		code = insns[i].code;
4463  
4464  		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4465  			insns[i].code = BPF_JMP | BPF_CALL;
4466  			insns[i].imm = BPF_FUNC_tail_call;
4467  			/* fall-through */
4468  		}
4469  		if (code == (BPF_JMP | BPF_CALL) ||
4470  		    code == (BPF_JMP | BPF_CALL_ARGS)) {
4471  			if (code == (BPF_JMP | BPF_CALL_ARGS))
4472  				insns[i].code = BPF_JMP | BPF_CALL;
4473  			if (!bpf_dump_raw_ok(f_cred))
4474  				insns[i].imm = 0;
4475  			continue;
4476  		}
4477  		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4478  			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4479  			continue;
4480  		}
4481  
4482  		if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4483  		     BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4484  			insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4485  			continue;
4486  		}
4487  
4488  		if (code != (BPF_LD | BPF_IMM | BPF_DW))
4489  			continue;
4490  
4491  		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4492  		map = bpf_map_from_imm(prog, imm, &off, &type);
4493  		if (map) {
4494  			insns[i].src_reg = type;
4495  			insns[i].imm = map->id;
4496  			insns[i + 1].imm = off;
4497  			continue;
4498  		}
4499  	}
4500  
4501  	return insns;
4502  }
4503  
set_info_rec_size(struct bpf_prog_info * info)4504  static int set_info_rec_size(struct bpf_prog_info *info)
4505  {
4506  	/*
4507  	 * Ensure info.*_rec_size is the same as kernel expected size
4508  	 *
4509  	 * or
4510  	 *
4511  	 * Only allow zero *_rec_size if both _rec_size and _cnt are
4512  	 * zero.  In this case, the kernel will set the expected
4513  	 * _rec_size back to the info.
4514  	 */
4515  
4516  	if ((info->nr_func_info || info->func_info_rec_size) &&
4517  	    info->func_info_rec_size != sizeof(struct bpf_func_info))
4518  		return -EINVAL;
4519  
4520  	if ((info->nr_line_info || info->line_info_rec_size) &&
4521  	    info->line_info_rec_size != sizeof(struct bpf_line_info))
4522  		return -EINVAL;
4523  
4524  	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4525  	    info->jited_line_info_rec_size != sizeof(__u64))
4526  		return -EINVAL;
4527  
4528  	info->func_info_rec_size = sizeof(struct bpf_func_info);
4529  	info->line_info_rec_size = sizeof(struct bpf_line_info);
4530  	info->jited_line_info_rec_size = sizeof(__u64);
4531  
4532  	return 0;
4533  }
4534  
bpf_prog_get_info_by_fd(struct file * file,struct bpf_prog * prog,const union bpf_attr * attr,union bpf_attr __user * uattr)4535  static int bpf_prog_get_info_by_fd(struct file *file,
4536  				   struct bpf_prog *prog,
4537  				   const union bpf_attr *attr,
4538  				   union bpf_attr __user *uattr)
4539  {
4540  	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4541  	struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4542  	struct bpf_prog_info info;
4543  	u32 info_len = attr->info.info_len;
4544  	struct bpf_prog_kstats stats;
4545  	char __user *uinsns;
4546  	u32 ulen;
4547  	int err;
4548  
4549  	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4550  	if (err)
4551  		return err;
4552  	info_len = min_t(u32, sizeof(info), info_len);
4553  
4554  	memset(&info, 0, sizeof(info));
4555  	if (copy_from_user(&info, uinfo, info_len))
4556  		return -EFAULT;
4557  
4558  	info.type = prog->type;
4559  	info.id = prog->aux->id;
4560  	info.load_time = prog->aux->load_time;
4561  	info.created_by_uid = from_kuid_munged(current_user_ns(),
4562  					       prog->aux->user->uid);
4563  	info.gpl_compatible = prog->gpl_compatible;
4564  
4565  	memcpy(info.tag, prog->tag, sizeof(prog->tag));
4566  	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4567  
4568  	mutex_lock(&prog->aux->used_maps_mutex);
4569  	ulen = info.nr_map_ids;
4570  	info.nr_map_ids = prog->aux->used_map_cnt;
4571  	ulen = min_t(u32, info.nr_map_ids, ulen);
4572  	if (ulen) {
4573  		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4574  		u32 i;
4575  
4576  		for (i = 0; i < ulen; i++)
4577  			if (put_user(prog->aux->used_maps[i]->id,
4578  				     &user_map_ids[i])) {
4579  				mutex_unlock(&prog->aux->used_maps_mutex);
4580  				return -EFAULT;
4581  			}
4582  	}
4583  	mutex_unlock(&prog->aux->used_maps_mutex);
4584  
4585  	err = set_info_rec_size(&info);
4586  	if (err)
4587  		return err;
4588  
4589  	bpf_prog_get_stats(prog, &stats);
4590  	info.run_time_ns = stats.nsecs;
4591  	info.run_cnt = stats.cnt;
4592  	info.recursion_misses = stats.misses;
4593  
4594  	info.verified_insns = prog->aux->verified_insns;
4595  
4596  	if (!bpf_capable()) {
4597  		info.jited_prog_len = 0;
4598  		info.xlated_prog_len = 0;
4599  		info.nr_jited_ksyms = 0;
4600  		info.nr_jited_func_lens = 0;
4601  		info.nr_func_info = 0;
4602  		info.nr_line_info = 0;
4603  		info.nr_jited_line_info = 0;
4604  		goto done;
4605  	}
4606  
4607  	ulen = info.xlated_prog_len;
4608  	info.xlated_prog_len = bpf_prog_insn_size(prog);
4609  	if (info.xlated_prog_len && ulen) {
4610  		struct bpf_insn *insns_sanitized;
4611  		bool fault;
4612  
4613  		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4614  			info.xlated_prog_insns = 0;
4615  			goto done;
4616  		}
4617  		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4618  		if (!insns_sanitized)
4619  			return -ENOMEM;
4620  		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4621  		ulen = min_t(u32, info.xlated_prog_len, ulen);
4622  		fault = copy_to_user(uinsns, insns_sanitized, ulen);
4623  		kfree(insns_sanitized);
4624  		if (fault)
4625  			return -EFAULT;
4626  	}
4627  
4628  	if (bpf_prog_is_offloaded(prog->aux)) {
4629  		err = bpf_prog_offload_info_fill(&info, prog);
4630  		if (err)
4631  			return err;
4632  		goto done;
4633  	}
4634  
4635  	/* NOTE: the following code is supposed to be skipped for offload.
4636  	 * bpf_prog_offload_info_fill() is the place to fill similar fields
4637  	 * for offload.
4638  	 */
4639  	ulen = info.jited_prog_len;
4640  	if (prog->aux->func_cnt) {
4641  		u32 i;
4642  
4643  		info.jited_prog_len = 0;
4644  		for (i = 0; i < prog->aux->func_cnt; i++)
4645  			info.jited_prog_len += prog->aux->func[i]->jited_len;
4646  	} else {
4647  		info.jited_prog_len = prog->jited_len;
4648  	}
4649  
4650  	if (info.jited_prog_len && ulen) {
4651  		if (bpf_dump_raw_ok(file->f_cred)) {
4652  			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4653  			ulen = min_t(u32, info.jited_prog_len, ulen);
4654  
4655  			/* for multi-function programs, copy the JITed
4656  			 * instructions for all the functions
4657  			 */
4658  			if (prog->aux->func_cnt) {
4659  				u32 len, free, i;
4660  				u8 *img;
4661  
4662  				free = ulen;
4663  				for (i = 0; i < prog->aux->func_cnt; i++) {
4664  					len = prog->aux->func[i]->jited_len;
4665  					len = min_t(u32, len, free);
4666  					img = (u8 *) prog->aux->func[i]->bpf_func;
4667  					if (copy_to_user(uinsns, img, len))
4668  						return -EFAULT;
4669  					uinsns += len;
4670  					free -= len;
4671  					if (!free)
4672  						break;
4673  				}
4674  			} else {
4675  				if (copy_to_user(uinsns, prog->bpf_func, ulen))
4676  					return -EFAULT;
4677  			}
4678  		} else {
4679  			info.jited_prog_insns = 0;
4680  		}
4681  	}
4682  
4683  	ulen = info.nr_jited_ksyms;
4684  	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4685  	if (ulen) {
4686  		if (bpf_dump_raw_ok(file->f_cred)) {
4687  			unsigned long ksym_addr;
4688  			u64 __user *user_ksyms;
4689  			u32 i;
4690  
4691  			/* copy the address of the kernel symbol
4692  			 * corresponding to each function
4693  			 */
4694  			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4695  			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4696  			if (prog->aux->func_cnt) {
4697  				for (i = 0; i < ulen; i++) {
4698  					ksym_addr = (unsigned long)
4699  						prog->aux->func[i]->bpf_func;
4700  					if (put_user((u64) ksym_addr,
4701  						     &user_ksyms[i]))
4702  						return -EFAULT;
4703  				}
4704  			} else {
4705  				ksym_addr = (unsigned long) prog->bpf_func;
4706  				if (put_user((u64) ksym_addr, &user_ksyms[0]))
4707  					return -EFAULT;
4708  			}
4709  		} else {
4710  			info.jited_ksyms = 0;
4711  		}
4712  	}
4713  
4714  	ulen = info.nr_jited_func_lens;
4715  	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4716  	if (ulen) {
4717  		if (bpf_dump_raw_ok(file->f_cred)) {
4718  			u32 __user *user_lens;
4719  			u32 func_len, i;
4720  
4721  			/* copy the JITed image lengths for each function */
4722  			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4723  			user_lens = u64_to_user_ptr(info.jited_func_lens);
4724  			if (prog->aux->func_cnt) {
4725  				for (i = 0; i < ulen; i++) {
4726  					func_len =
4727  						prog->aux->func[i]->jited_len;
4728  					if (put_user(func_len, &user_lens[i]))
4729  						return -EFAULT;
4730  				}
4731  			} else {
4732  				func_len = prog->jited_len;
4733  				if (put_user(func_len, &user_lens[0]))
4734  					return -EFAULT;
4735  			}
4736  		} else {
4737  			info.jited_func_lens = 0;
4738  		}
4739  	}
4740  
4741  	if (prog->aux->btf)
4742  		info.btf_id = btf_obj_id(prog->aux->btf);
4743  	info.attach_btf_id = prog->aux->attach_btf_id;
4744  	if (attach_btf)
4745  		info.attach_btf_obj_id = btf_obj_id(attach_btf);
4746  
4747  	ulen = info.nr_func_info;
4748  	info.nr_func_info = prog->aux->func_info_cnt;
4749  	if (info.nr_func_info && ulen) {
4750  		char __user *user_finfo;
4751  
4752  		user_finfo = u64_to_user_ptr(info.func_info);
4753  		ulen = min_t(u32, info.nr_func_info, ulen);
4754  		if (copy_to_user(user_finfo, prog->aux->func_info,
4755  				 info.func_info_rec_size * ulen))
4756  			return -EFAULT;
4757  	}
4758  
4759  	ulen = info.nr_line_info;
4760  	info.nr_line_info = prog->aux->nr_linfo;
4761  	if (info.nr_line_info && ulen) {
4762  		__u8 __user *user_linfo;
4763  
4764  		user_linfo = u64_to_user_ptr(info.line_info);
4765  		ulen = min_t(u32, info.nr_line_info, ulen);
4766  		if (copy_to_user(user_linfo, prog->aux->linfo,
4767  				 info.line_info_rec_size * ulen))
4768  			return -EFAULT;
4769  	}
4770  
4771  	ulen = info.nr_jited_line_info;
4772  	if (prog->aux->jited_linfo)
4773  		info.nr_jited_line_info = prog->aux->nr_linfo;
4774  	else
4775  		info.nr_jited_line_info = 0;
4776  	if (info.nr_jited_line_info && ulen) {
4777  		if (bpf_dump_raw_ok(file->f_cred)) {
4778  			unsigned long line_addr;
4779  			__u64 __user *user_linfo;
4780  			u32 i;
4781  
4782  			user_linfo = u64_to_user_ptr(info.jited_line_info);
4783  			ulen = min_t(u32, info.nr_jited_line_info, ulen);
4784  			for (i = 0; i < ulen; i++) {
4785  				line_addr = (unsigned long)prog->aux->jited_linfo[i];
4786  				if (put_user((__u64)line_addr, &user_linfo[i]))
4787  					return -EFAULT;
4788  			}
4789  		} else {
4790  			info.jited_line_info = 0;
4791  		}
4792  	}
4793  
4794  	ulen = info.nr_prog_tags;
4795  	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4796  	if (ulen) {
4797  		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4798  		u32 i;
4799  
4800  		user_prog_tags = u64_to_user_ptr(info.prog_tags);
4801  		ulen = min_t(u32, info.nr_prog_tags, ulen);
4802  		if (prog->aux->func_cnt) {
4803  			for (i = 0; i < ulen; i++) {
4804  				if (copy_to_user(user_prog_tags[i],
4805  						 prog->aux->func[i]->tag,
4806  						 BPF_TAG_SIZE))
4807  					return -EFAULT;
4808  			}
4809  		} else {
4810  			if (copy_to_user(user_prog_tags[0],
4811  					 prog->tag, BPF_TAG_SIZE))
4812  				return -EFAULT;
4813  		}
4814  	}
4815  
4816  done:
4817  	if (copy_to_user(uinfo, &info, info_len) ||
4818  	    put_user(info_len, &uattr->info.info_len))
4819  		return -EFAULT;
4820  
4821  	return 0;
4822  }
4823  
bpf_map_get_info_by_fd(struct file * file,struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)4824  static int bpf_map_get_info_by_fd(struct file *file,
4825  				  struct bpf_map *map,
4826  				  const union bpf_attr *attr,
4827  				  union bpf_attr __user *uattr)
4828  {
4829  	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4830  	struct bpf_map_info info;
4831  	u32 info_len = attr->info.info_len;
4832  	int err;
4833  
4834  	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4835  	if (err)
4836  		return err;
4837  	info_len = min_t(u32, sizeof(info), info_len);
4838  
4839  	memset(&info, 0, sizeof(info));
4840  	info.type = map->map_type;
4841  	info.id = map->id;
4842  	info.key_size = map->key_size;
4843  	info.value_size = map->value_size;
4844  	info.max_entries = map->max_entries;
4845  	info.map_flags = map->map_flags;
4846  	info.map_extra = map->map_extra;
4847  	memcpy(info.name, map->name, sizeof(map->name));
4848  
4849  	if (map->btf) {
4850  		info.btf_id = btf_obj_id(map->btf);
4851  		info.btf_key_type_id = map->btf_key_type_id;
4852  		info.btf_value_type_id = map->btf_value_type_id;
4853  	}
4854  	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4855  	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
4856  		bpf_map_struct_ops_info_fill(&info, map);
4857  
4858  	if (bpf_map_is_offloaded(map)) {
4859  		err = bpf_map_offload_info_fill(&info, map);
4860  		if (err)
4861  			return err;
4862  	}
4863  
4864  	if (copy_to_user(uinfo, &info, info_len) ||
4865  	    put_user(info_len, &uattr->info.info_len))
4866  		return -EFAULT;
4867  
4868  	return 0;
4869  }
4870  
bpf_btf_get_info_by_fd(struct file * file,struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)4871  static int bpf_btf_get_info_by_fd(struct file *file,
4872  				  struct btf *btf,
4873  				  const union bpf_attr *attr,
4874  				  union bpf_attr __user *uattr)
4875  {
4876  	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4877  	u32 info_len = attr->info.info_len;
4878  	int err;
4879  
4880  	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4881  	if (err)
4882  		return err;
4883  
4884  	return btf_get_info_by_fd(btf, attr, uattr);
4885  }
4886  
bpf_link_get_info_by_fd(struct file * file,struct bpf_link * link,const union bpf_attr * attr,union bpf_attr __user * uattr)4887  static int bpf_link_get_info_by_fd(struct file *file,
4888  				  struct bpf_link *link,
4889  				  const union bpf_attr *attr,
4890  				  union bpf_attr __user *uattr)
4891  {
4892  	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4893  	struct bpf_link_info info;
4894  	u32 info_len = attr->info.info_len;
4895  	int err;
4896  
4897  	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4898  	if (err)
4899  		return err;
4900  	info_len = min_t(u32, sizeof(info), info_len);
4901  
4902  	memset(&info, 0, sizeof(info));
4903  	if (copy_from_user(&info, uinfo, info_len))
4904  		return -EFAULT;
4905  
4906  	info.type = link->type;
4907  	info.id = link->id;
4908  	if (link->prog)
4909  		info.prog_id = link->prog->aux->id;
4910  
4911  	if (link->ops->fill_link_info) {
4912  		err = link->ops->fill_link_info(link, &info);
4913  		if (err)
4914  			return err;
4915  	}
4916  
4917  	if (copy_to_user(uinfo, &info, info_len) ||
4918  	    put_user(info_len, &uattr->info.info_len))
4919  		return -EFAULT;
4920  
4921  	return 0;
4922  }
4923  
4924  
4925  #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4926  
bpf_obj_get_info_by_fd(const union bpf_attr * attr,union bpf_attr __user * uattr)4927  static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4928  				  union bpf_attr __user *uattr)
4929  {
4930  	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4931  		return -EINVAL;
4932  
4933  	CLASS(fd, f)(attr->info.bpf_fd);
4934  	if (fd_empty(f))
4935  		return -EBADFD;
4936  
4937  	if (fd_file(f)->f_op == &bpf_prog_fops)
4938  		return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
4939  					      uattr);
4940  	else if (fd_file(f)->f_op == &bpf_map_fops)
4941  		return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
4942  					     uattr);
4943  	else if (fd_file(f)->f_op == &btf_fops)
4944  		return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
4945  	else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
4946  		return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
4947  					      attr, uattr);
4948  	return -EINVAL;
4949  }
4950  
4951  #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
4952  
bpf_btf_load(const union bpf_attr * attr,bpfptr_t uattr,__u32 uattr_size)4953  static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4954  {
4955  	struct bpf_token *token = NULL;
4956  
4957  	if (CHECK_ATTR(BPF_BTF_LOAD))
4958  		return -EINVAL;
4959  
4960  	if (attr->btf_flags & ~BPF_F_TOKEN_FD)
4961  		return -EINVAL;
4962  
4963  	if (attr->btf_flags & BPF_F_TOKEN_FD) {
4964  		token = bpf_token_get_from_fd(attr->btf_token_fd);
4965  		if (IS_ERR(token))
4966  			return PTR_ERR(token);
4967  		if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
4968  			bpf_token_put(token);
4969  			token = NULL;
4970  		}
4971  	}
4972  
4973  	if (!bpf_token_capable(token, CAP_BPF)) {
4974  		bpf_token_put(token);
4975  		return -EPERM;
4976  	}
4977  
4978  	bpf_token_put(token);
4979  
4980  	return btf_new_fd(attr, uattr, uattr_size);
4981  }
4982  
4983  #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4984  
bpf_btf_get_fd_by_id(const union bpf_attr * attr)4985  static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4986  {
4987  	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4988  		return -EINVAL;
4989  
4990  	if (!capable(CAP_SYS_ADMIN))
4991  		return -EPERM;
4992  
4993  	return btf_get_fd_by_id(attr->btf_id);
4994  }
4995  
bpf_task_fd_query_copy(const union bpf_attr * attr,union bpf_attr __user * uattr,u32 prog_id,u32 fd_type,const char * buf,u64 probe_offset,u64 probe_addr)4996  static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4997  				    union bpf_attr __user *uattr,
4998  				    u32 prog_id, u32 fd_type,
4999  				    const char *buf, u64 probe_offset,
5000  				    u64 probe_addr)
5001  {
5002  	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5003  	u32 len = buf ? strlen(buf) : 0, input_len;
5004  	int err = 0;
5005  
5006  	if (put_user(len, &uattr->task_fd_query.buf_len))
5007  		return -EFAULT;
5008  	input_len = attr->task_fd_query.buf_len;
5009  	if (input_len && ubuf) {
5010  		if (!len) {
5011  			/* nothing to copy, just make ubuf NULL terminated */
5012  			char zero = '\0';
5013  
5014  			if (put_user(zero, ubuf))
5015  				return -EFAULT;
5016  		} else if (input_len >= len + 1) {
5017  			/* ubuf can hold the string with NULL terminator */
5018  			if (copy_to_user(ubuf, buf, len + 1))
5019  				return -EFAULT;
5020  		} else {
5021  			/* ubuf cannot hold the string with NULL terminator,
5022  			 * do a partial copy with NULL terminator.
5023  			 */
5024  			char zero = '\0';
5025  
5026  			err = -ENOSPC;
5027  			if (copy_to_user(ubuf, buf, input_len - 1))
5028  				return -EFAULT;
5029  			if (put_user(zero, ubuf + input_len - 1))
5030  				return -EFAULT;
5031  		}
5032  	}
5033  
5034  	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5035  	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5036  	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5037  	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5038  		return -EFAULT;
5039  
5040  	return err;
5041  }
5042  
5043  #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5044  
bpf_task_fd_query(const union bpf_attr * attr,union bpf_attr __user * uattr)5045  static int bpf_task_fd_query(const union bpf_attr *attr,
5046  			     union bpf_attr __user *uattr)
5047  {
5048  	pid_t pid = attr->task_fd_query.pid;
5049  	u32 fd = attr->task_fd_query.fd;
5050  	const struct perf_event *event;
5051  	struct task_struct *task;
5052  	struct file *file;
5053  	int err;
5054  
5055  	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5056  		return -EINVAL;
5057  
5058  	if (!capable(CAP_SYS_ADMIN))
5059  		return -EPERM;
5060  
5061  	if (attr->task_fd_query.flags != 0)
5062  		return -EINVAL;
5063  
5064  	rcu_read_lock();
5065  	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5066  	rcu_read_unlock();
5067  	if (!task)
5068  		return -ENOENT;
5069  
5070  	err = 0;
5071  	file = fget_task(task, fd);
5072  	put_task_struct(task);
5073  	if (!file)
5074  		return -EBADF;
5075  
5076  	if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
5077  		struct bpf_link *link = file->private_data;
5078  
5079  		if (link->ops == &bpf_raw_tp_link_lops) {
5080  			struct bpf_raw_tp_link *raw_tp =
5081  				container_of(link, struct bpf_raw_tp_link, link);
5082  			struct bpf_raw_event_map *btp = raw_tp->btp;
5083  
5084  			err = bpf_task_fd_query_copy(attr, uattr,
5085  						     raw_tp->link.prog->aux->id,
5086  						     BPF_FD_TYPE_RAW_TRACEPOINT,
5087  						     btp->tp->name, 0, 0);
5088  			goto put_file;
5089  		}
5090  		goto out_not_supp;
5091  	}
5092  
5093  	event = perf_get_event(file);
5094  	if (!IS_ERR(event)) {
5095  		u64 probe_offset, probe_addr;
5096  		u32 prog_id, fd_type;
5097  		const char *buf;
5098  
5099  		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5100  					      &buf, &probe_offset,
5101  					      &probe_addr, NULL);
5102  		if (!err)
5103  			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5104  						     fd_type, buf,
5105  						     probe_offset,
5106  						     probe_addr);
5107  		goto put_file;
5108  	}
5109  
5110  out_not_supp:
5111  	err = -ENOTSUPP;
5112  put_file:
5113  	fput(file);
5114  	return err;
5115  }
5116  
5117  #define BPF_MAP_BATCH_LAST_FIELD batch.flags
5118  
5119  #define BPF_DO_BATCH(fn, ...)			\
5120  	do {					\
5121  		if (!fn) {			\
5122  			err = -ENOTSUPP;	\
5123  			goto err_put;		\
5124  		}				\
5125  		err = fn(__VA_ARGS__);		\
5126  	} while (0)
5127  
bpf_map_do_batch(const union bpf_attr * attr,union bpf_attr __user * uattr,int cmd)5128  static int bpf_map_do_batch(const union bpf_attr *attr,
5129  			    union bpf_attr __user *uattr,
5130  			    int cmd)
5131  {
5132  	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
5133  			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5134  	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5135  	struct bpf_map *map;
5136  	int err;
5137  
5138  	if (CHECK_ATTR(BPF_MAP_BATCH))
5139  		return -EINVAL;
5140  
5141  	CLASS(fd, f)(attr->batch.map_fd);
5142  
5143  	map = __bpf_map_get(f);
5144  	if (IS_ERR(map))
5145  		return PTR_ERR(map);
5146  	if (has_write)
5147  		bpf_map_write_active_inc(map);
5148  	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5149  		err = -EPERM;
5150  		goto err_put;
5151  	}
5152  	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5153  		err = -EPERM;
5154  		goto err_put;
5155  	}
5156  
5157  	if (cmd == BPF_MAP_LOOKUP_BATCH)
5158  		BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5159  	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5160  		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5161  	else if (cmd == BPF_MAP_UPDATE_BATCH)
5162  		BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
5163  	else
5164  		BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5165  err_put:
5166  	if (has_write) {
5167  		maybe_wait_bpf_programs(map);
5168  		bpf_map_write_active_dec(map);
5169  	}
5170  	return err;
5171  }
5172  
5173  #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
link_create(union bpf_attr * attr,bpfptr_t uattr)5174  static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5175  {
5176  	struct bpf_prog *prog;
5177  	int ret;
5178  
5179  	if (CHECK_ATTR(BPF_LINK_CREATE))
5180  		return -EINVAL;
5181  
5182  	if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5183  		return bpf_struct_ops_link_create(attr);
5184  
5185  	prog = bpf_prog_get(attr->link_create.prog_fd);
5186  	if (IS_ERR(prog))
5187  		return PTR_ERR(prog);
5188  
5189  	ret = bpf_prog_attach_check_attach_type(prog,
5190  						attr->link_create.attach_type);
5191  	if (ret)
5192  		goto out;
5193  
5194  	switch (prog->type) {
5195  	case BPF_PROG_TYPE_CGROUP_SKB:
5196  	case BPF_PROG_TYPE_CGROUP_SOCK:
5197  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5198  	case BPF_PROG_TYPE_SOCK_OPS:
5199  	case BPF_PROG_TYPE_CGROUP_DEVICE:
5200  	case BPF_PROG_TYPE_CGROUP_SYSCTL:
5201  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5202  		ret = cgroup_bpf_link_attach(attr, prog);
5203  		break;
5204  	case BPF_PROG_TYPE_EXT:
5205  		ret = bpf_tracing_prog_attach(prog,
5206  					      attr->link_create.target_fd,
5207  					      attr->link_create.target_btf_id,
5208  					      attr->link_create.tracing.cookie);
5209  		break;
5210  	case BPF_PROG_TYPE_LSM:
5211  	case BPF_PROG_TYPE_TRACING:
5212  		if (attr->link_create.attach_type != prog->expected_attach_type) {
5213  			ret = -EINVAL;
5214  			goto out;
5215  		}
5216  		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5217  			ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie);
5218  		else if (prog->expected_attach_type == BPF_TRACE_ITER)
5219  			ret = bpf_iter_link_attach(attr, uattr, prog);
5220  		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5221  			ret = cgroup_bpf_link_attach(attr, prog);
5222  		else
5223  			ret = bpf_tracing_prog_attach(prog,
5224  						      attr->link_create.target_fd,
5225  						      attr->link_create.target_btf_id,
5226  						      attr->link_create.tracing.cookie);
5227  		break;
5228  	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5229  	case BPF_PROG_TYPE_SK_LOOKUP:
5230  		ret = netns_bpf_link_create(attr, prog);
5231  		break;
5232  	case BPF_PROG_TYPE_SK_MSG:
5233  	case BPF_PROG_TYPE_SK_SKB:
5234  		ret = sock_map_link_create(attr, prog);
5235  		break;
5236  #ifdef CONFIG_NET
5237  	case BPF_PROG_TYPE_XDP:
5238  		ret = bpf_xdp_link_attach(attr, prog);
5239  		break;
5240  	case BPF_PROG_TYPE_SCHED_CLS:
5241  		if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5242  		    attr->link_create.attach_type == BPF_TCX_EGRESS)
5243  			ret = tcx_link_attach(attr, prog);
5244  		else
5245  			ret = netkit_link_attach(attr, prog);
5246  		break;
5247  	case BPF_PROG_TYPE_NETFILTER:
5248  		ret = bpf_nf_link_attach(attr, prog);
5249  		break;
5250  #endif
5251  	case BPF_PROG_TYPE_PERF_EVENT:
5252  	case BPF_PROG_TYPE_TRACEPOINT:
5253  		ret = bpf_perf_link_attach(attr, prog);
5254  		break;
5255  	case BPF_PROG_TYPE_KPROBE:
5256  		if (attr->link_create.attach_type == BPF_PERF_EVENT)
5257  			ret = bpf_perf_link_attach(attr, prog);
5258  		else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
5259  			 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
5260  			ret = bpf_kprobe_multi_link_attach(attr, prog);
5261  		else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
5262  			ret = bpf_uprobe_multi_link_attach(attr, prog);
5263  		break;
5264  	default:
5265  		ret = -EINVAL;
5266  	}
5267  
5268  out:
5269  	if (ret < 0)
5270  		bpf_prog_put(prog);
5271  	return ret;
5272  }
5273  
link_update_map(struct bpf_link * link,union bpf_attr * attr)5274  static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5275  {
5276  	struct bpf_map *new_map, *old_map = NULL;
5277  	int ret;
5278  
5279  	new_map = bpf_map_get(attr->link_update.new_map_fd);
5280  	if (IS_ERR(new_map))
5281  		return PTR_ERR(new_map);
5282  
5283  	if (attr->link_update.flags & BPF_F_REPLACE) {
5284  		old_map = bpf_map_get(attr->link_update.old_map_fd);
5285  		if (IS_ERR(old_map)) {
5286  			ret = PTR_ERR(old_map);
5287  			goto out_put;
5288  		}
5289  	} else if (attr->link_update.old_map_fd) {
5290  		ret = -EINVAL;
5291  		goto out_put;
5292  	}
5293  
5294  	ret = link->ops->update_map(link, new_map, old_map);
5295  
5296  	if (old_map)
5297  		bpf_map_put(old_map);
5298  out_put:
5299  	bpf_map_put(new_map);
5300  	return ret;
5301  }
5302  
5303  #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5304  
link_update(union bpf_attr * attr)5305  static int link_update(union bpf_attr *attr)
5306  {
5307  	struct bpf_prog *old_prog = NULL, *new_prog;
5308  	struct bpf_link *link;
5309  	u32 flags;
5310  	int ret;
5311  
5312  	if (CHECK_ATTR(BPF_LINK_UPDATE))
5313  		return -EINVAL;
5314  
5315  	flags = attr->link_update.flags;
5316  	if (flags & ~BPF_F_REPLACE)
5317  		return -EINVAL;
5318  
5319  	link = bpf_link_get_from_fd(attr->link_update.link_fd);
5320  	if (IS_ERR(link))
5321  		return PTR_ERR(link);
5322  
5323  	if (link->ops->update_map) {
5324  		ret = link_update_map(link, attr);
5325  		goto out_put_link;
5326  	}
5327  
5328  	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5329  	if (IS_ERR(new_prog)) {
5330  		ret = PTR_ERR(new_prog);
5331  		goto out_put_link;
5332  	}
5333  
5334  	if (flags & BPF_F_REPLACE) {
5335  		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5336  		if (IS_ERR(old_prog)) {
5337  			ret = PTR_ERR(old_prog);
5338  			old_prog = NULL;
5339  			goto out_put_progs;
5340  		}
5341  	} else if (attr->link_update.old_prog_fd) {
5342  		ret = -EINVAL;
5343  		goto out_put_progs;
5344  	}
5345  
5346  	if (link->ops->update_prog)
5347  		ret = link->ops->update_prog(link, new_prog, old_prog);
5348  	else
5349  		ret = -EINVAL;
5350  
5351  out_put_progs:
5352  	if (old_prog)
5353  		bpf_prog_put(old_prog);
5354  	if (ret)
5355  		bpf_prog_put(new_prog);
5356  out_put_link:
5357  	bpf_link_put_direct(link);
5358  	return ret;
5359  }
5360  
5361  #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5362  
link_detach(union bpf_attr * attr)5363  static int link_detach(union bpf_attr *attr)
5364  {
5365  	struct bpf_link *link;
5366  	int ret;
5367  
5368  	if (CHECK_ATTR(BPF_LINK_DETACH))
5369  		return -EINVAL;
5370  
5371  	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5372  	if (IS_ERR(link))
5373  		return PTR_ERR(link);
5374  
5375  	if (link->ops->detach)
5376  		ret = link->ops->detach(link);
5377  	else
5378  		ret = -EOPNOTSUPP;
5379  
5380  	bpf_link_put_direct(link);
5381  	return ret;
5382  }
5383  
bpf_link_inc_not_zero(struct bpf_link * link)5384  struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5385  {
5386  	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5387  }
5388  EXPORT_SYMBOL(bpf_link_inc_not_zero);
5389  
bpf_link_by_id(u32 id)5390  struct bpf_link *bpf_link_by_id(u32 id)
5391  {
5392  	struct bpf_link *link;
5393  
5394  	if (!id)
5395  		return ERR_PTR(-ENOENT);
5396  
5397  	spin_lock_bh(&link_idr_lock);
5398  	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
5399  	link = idr_find(&link_idr, id);
5400  	if (link) {
5401  		if (link->id)
5402  			link = bpf_link_inc_not_zero(link);
5403  		else
5404  			link = ERR_PTR(-EAGAIN);
5405  	} else {
5406  		link = ERR_PTR(-ENOENT);
5407  	}
5408  	spin_unlock_bh(&link_idr_lock);
5409  	return link;
5410  }
5411  
bpf_link_get_curr_or_next(u32 * id)5412  struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5413  {
5414  	struct bpf_link *link;
5415  
5416  	spin_lock_bh(&link_idr_lock);
5417  again:
5418  	link = idr_get_next(&link_idr, id);
5419  	if (link) {
5420  		link = bpf_link_inc_not_zero(link);
5421  		if (IS_ERR(link)) {
5422  			(*id)++;
5423  			goto again;
5424  		}
5425  	}
5426  	spin_unlock_bh(&link_idr_lock);
5427  
5428  	return link;
5429  }
5430  
5431  #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5432  
bpf_link_get_fd_by_id(const union bpf_attr * attr)5433  static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5434  {
5435  	struct bpf_link *link;
5436  	u32 id = attr->link_id;
5437  	int fd;
5438  
5439  	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5440  		return -EINVAL;
5441  
5442  	if (!capable(CAP_SYS_ADMIN))
5443  		return -EPERM;
5444  
5445  	link = bpf_link_by_id(id);
5446  	if (IS_ERR(link))
5447  		return PTR_ERR(link);
5448  
5449  	fd = bpf_link_new_fd(link);
5450  	if (fd < 0)
5451  		bpf_link_put_direct(link);
5452  
5453  	return fd;
5454  }
5455  
5456  DEFINE_MUTEX(bpf_stats_enabled_mutex);
5457  
bpf_stats_release(struct inode * inode,struct file * file)5458  static int bpf_stats_release(struct inode *inode, struct file *file)
5459  {
5460  	mutex_lock(&bpf_stats_enabled_mutex);
5461  	static_key_slow_dec(&bpf_stats_enabled_key.key);
5462  	mutex_unlock(&bpf_stats_enabled_mutex);
5463  	return 0;
5464  }
5465  
5466  static const struct file_operations bpf_stats_fops = {
5467  	.release = bpf_stats_release,
5468  };
5469  
bpf_enable_runtime_stats(void)5470  static int bpf_enable_runtime_stats(void)
5471  {
5472  	int fd;
5473  
5474  	mutex_lock(&bpf_stats_enabled_mutex);
5475  
5476  	/* Set a very high limit to avoid overflow */
5477  	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5478  		mutex_unlock(&bpf_stats_enabled_mutex);
5479  		return -EBUSY;
5480  	}
5481  
5482  	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5483  	if (fd >= 0)
5484  		static_key_slow_inc(&bpf_stats_enabled_key.key);
5485  
5486  	mutex_unlock(&bpf_stats_enabled_mutex);
5487  	return fd;
5488  }
5489  
5490  #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5491  
bpf_enable_stats(union bpf_attr * attr)5492  static int bpf_enable_stats(union bpf_attr *attr)
5493  {
5494  
5495  	if (CHECK_ATTR(BPF_ENABLE_STATS))
5496  		return -EINVAL;
5497  
5498  	if (!capable(CAP_SYS_ADMIN))
5499  		return -EPERM;
5500  
5501  	switch (attr->enable_stats.type) {
5502  	case BPF_STATS_RUN_TIME:
5503  		return bpf_enable_runtime_stats();
5504  	default:
5505  		break;
5506  	}
5507  	return -EINVAL;
5508  }
5509  
5510  #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5511  
bpf_iter_create(union bpf_attr * attr)5512  static int bpf_iter_create(union bpf_attr *attr)
5513  {
5514  	struct bpf_link *link;
5515  	int err;
5516  
5517  	if (CHECK_ATTR(BPF_ITER_CREATE))
5518  		return -EINVAL;
5519  
5520  	if (attr->iter_create.flags)
5521  		return -EINVAL;
5522  
5523  	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5524  	if (IS_ERR(link))
5525  		return PTR_ERR(link);
5526  
5527  	err = bpf_iter_new_fd(link);
5528  	bpf_link_put_direct(link);
5529  
5530  	return err;
5531  }
5532  
5533  #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5534  
bpf_prog_bind_map(union bpf_attr * attr)5535  static int bpf_prog_bind_map(union bpf_attr *attr)
5536  {
5537  	struct bpf_prog *prog;
5538  	struct bpf_map *map;
5539  	struct bpf_map **used_maps_old, **used_maps_new;
5540  	int i, ret = 0;
5541  
5542  	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5543  		return -EINVAL;
5544  
5545  	if (attr->prog_bind_map.flags)
5546  		return -EINVAL;
5547  
5548  	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5549  	if (IS_ERR(prog))
5550  		return PTR_ERR(prog);
5551  
5552  	map = bpf_map_get(attr->prog_bind_map.map_fd);
5553  	if (IS_ERR(map)) {
5554  		ret = PTR_ERR(map);
5555  		goto out_prog_put;
5556  	}
5557  
5558  	mutex_lock(&prog->aux->used_maps_mutex);
5559  
5560  	used_maps_old = prog->aux->used_maps;
5561  
5562  	for (i = 0; i < prog->aux->used_map_cnt; i++)
5563  		if (used_maps_old[i] == map) {
5564  			bpf_map_put(map);
5565  			goto out_unlock;
5566  		}
5567  
5568  	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5569  				      sizeof(used_maps_new[0]),
5570  				      GFP_KERNEL);
5571  	if (!used_maps_new) {
5572  		ret = -ENOMEM;
5573  		goto out_unlock;
5574  	}
5575  
5576  	/* The bpf program will not access the bpf map, but for the sake of
5577  	 * simplicity, increase sleepable_refcnt for sleepable program as well.
5578  	 */
5579  	if (prog->sleepable)
5580  		atomic64_inc(&map->sleepable_refcnt);
5581  	memcpy(used_maps_new, used_maps_old,
5582  	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5583  	used_maps_new[prog->aux->used_map_cnt] = map;
5584  
5585  	prog->aux->used_map_cnt++;
5586  	prog->aux->used_maps = used_maps_new;
5587  
5588  	kfree(used_maps_old);
5589  
5590  out_unlock:
5591  	mutex_unlock(&prog->aux->used_maps_mutex);
5592  
5593  	if (ret)
5594  		bpf_map_put(map);
5595  out_prog_put:
5596  	bpf_prog_put(prog);
5597  	return ret;
5598  }
5599  
5600  #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5601  
token_create(union bpf_attr * attr)5602  static int token_create(union bpf_attr *attr)
5603  {
5604  	if (CHECK_ATTR(BPF_TOKEN_CREATE))
5605  		return -EINVAL;
5606  
5607  	/* no flags are supported yet */
5608  	if (attr->token_create.flags)
5609  		return -EINVAL;
5610  
5611  	return bpf_token_create(attr);
5612  }
5613  
__sys_bpf(enum bpf_cmd cmd,bpfptr_t uattr,unsigned int size)5614  static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
5615  {
5616  	union bpf_attr attr;
5617  	int err;
5618  
5619  	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5620  	if (err)
5621  		return err;
5622  	size = min_t(u32, size, sizeof(attr));
5623  
5624  	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
5625  	memset(&attr, 0, sizeof(attr));
5626  	if (copy_from_bpfptr(&attr, uattr, size) != 0)
5627  		return -EFAULT;
5628  
5629  	err = security_bpf(cmd, &attr, size);
5630  	if (err < 0)
5631  		return err;
5632  
5633  	switch (cmd) {
5634  	case BPF_MAP_CREATE:
5635  		err = map_create(&attr);
5636  		break;
5637  	case BPF_MAP_LOOKUP_ELEM:
5638  		err = map_lookup_elem(&attr);
5639  		break;
5640  	case BPF_MAP_UPDATE_ELEM:
5641  		err = map_update_elem(&attr, uattr);
5642  		break;
5643  	case BPF_MAP_DELETE_ELEM:
5644  		err = map_delete_elem(&attr, uattr);
5645  		break;
5646  	case BPF_MAP_GET_NEXT_KEY:
5647  		err = map_get_next_key(&attr);
5648  		break;
5649  	case BPF_MAP_FREEZE:
5650  		err = map_freeze(&attr);
5651  		break;
5652  	case BPF_PROG_LOAD:
5653  		err = bpf_prog_load(&attr, uattr, size);
5654  		break;
5655  	case BPF_OBJ_PIN:
5656  		err = bpf_obj_pin(&attr);
5657  		break;
5658  	case BPF_OBJ_GET:
5659  		err = bpf_obj_get(&attr);
5660  		break;
5661  	case BPF_PROG_ATTACH:
5662  		err = bpf_prog_attach(&attr);
5663  		break;
5664  	case BPF_PROG_DETACH:
5665  		err = bpf_prog_detach(&attr);
5666  		break;
5667  	case BPF_PROG_QUERY:
5668  		err = bpf_prog_query(&attr, uattr.user);
5669  		break;
5670  	case BPF_PROG_TEST_RUN:
5671  		err = bpf_prog_test_run(&attr, uattr.user);
5672  		break;
5673  	case BPF_PROG_GET_NEXT_ID:
5674  		err = bpf_obj_get_next_id(&attr, uattr.user,
5675  					  &prog_idr, &prog_idr_lock);
5676  		break;
5677  	case BPF_MAP_GET_NEXT_ID:
5678  		err = bpf_obj_get_next_id(&attr, uattr.user,
5679  					  &map_idr, &map_idr_lock);
5680  		break;
5681  	case BPF_BTF_GET_NEXT_ID:
5682  		err = bpf_obj_get_next_id(&attr, uattr.user,
5683  					  &btf_idr, &btf_idr_lock);
5684  		break;
5685  	case BPF_PROG_GET_FD_BY_ID:
5686  		err = bpf_prog_get_fd_by_id(&attr);
5687  		break;
5688  	case BPF_MAP_GET_FD_BY_ID:
5689  		err = bpf_map_get_fd_by_id(&attr);
5690  		break;
5691  	case BPF_OBJ_GET_INFO_BY_FD:
5692  		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5693  		break;
5694  	case BPF_RAW_TRACEPOINT_OPEN:
5695  		err = bpf_raw_tracepoint_open(&attr);
5696  		break;
5697  	case BPF_BTF_LOAD:
5698  		err = bpf_btf_load(&attr, uattr, size);
5699  		break;
5700  	case BPF_BTF_GET_FD_BY_ID:
5701  		err = bpf_btf_get_fd_by_id(&attr);
5702  		break;
5703  	case BPF_TASK_FD_QUERY:
5704  		err = bpf_task_fd_query(&attr, uattr.user);
5705  		break;
5706  	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5707  		err = map_lookup_and_delete_elem(&attr);
5708  		break;
5709  	case BPF_MAP_LOOKUP_BATCH:
5710  		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5711  		break;
5712  	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5713  		err = bpf_map_do_batch(&attr, uattr.user,
5714  				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5715  		break;
5716  	case BPF_MAP_UPDATE_BATCH:
5717  		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5718  		break;
5719  	case BPF_MAP_DELETE_BATCH:
5720  		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5721  		break;
5722  	case BPF_LINK_CREATE:
5723  		err = link_create(&attr, uattr);
5724  		break;
5725  	case BPF_LINK_UPDATE:
5726  		err = link_update(&attr);
5727  		break;
5728  	case BPF_LINK_GET_FD_BY_ID:
5729  		err = bpf_link_get_fd_by_id(&attr);
5730  		break;
5731  	case BPF_LINK_GET_NEXT_ID:
5732  		err = bpf_obj_get_next_id(&attr, uattr.user,
5733  					  &link_idr, &link_idr_lock);
5734  		break;
5735  	case BPF_ENABLE_STATS:
5736  		err = bpf_enable_stats(&attr);
5737  		break;
5738  	case BPF_ITER_CREATE:
5739  		err = bpf_iter_create(&attr);
5740  		break;
5741  	case BPF_LINK_DETACH:
5742  		err = link_detach(&attr);
5743  		break;
5744  	case BPF_PROG_BIND_MAP:
5745  		err = bpf_prog_bind_map(&attr);
5746  		break;
5747  	case BPF_TOKEN_CREATE:
5748  		err = token_create(&attr);
5749  		break;
5750  	default:
5751  		err = -EINVAL;
5752  		break;
5753  	}
5754  
5755  	return err;
5756  }
5757  
SYSCALL_DEFINE3(bpf,int,cmd,union bpf_attr __user *,uattr,unsigned int,size)5758  SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5759  {
5760  	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5761  }
5762  
syscall_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5763  static bool syscall_prog_is_valid_access(int off, int size,
5764  					 enum bpf_access_type type,
5765  					 const struct bpf_prog *prog,
5766  					 struct bpf_insn_access_aux *info)
5767  {
5768  	if (off < 0 || off >= U16_MAX)
5769  		return false;
5770  	if (off % size != 0)
5771  		return false;
5772  	return true;
5773  }
5774  
BPF_CALL_3(bpf_sys_bpf,int,cmd,union bpf_attr *,attr,u32,attr_size)5775  BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5776  {
5777  	switch (cmd) {
5778  	case BPF_MAP_CREATE:
5779  	case BPF_MAP_DELETE_ELEM:
5780  	case BPF_MAP_UPDATE_ELEM:
5781  	case BPF_MAP_FREEZE:
5782  	case BPF_MAP_GET_FD_BY_ID:
5783  	case BPF_PROG_LOAD:
5784  	case BPF_BTF_LOAD:
5785  	case BPF_LINK_CREATE:
5786  	case BPF_RAW_TRACEPOINT_OPEN:
5787  		break;
5788  	default:
5789  		return -EINVAL;
5790  	}
5791  	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5792  }
5793  
5794  
5795  /* To shut up -Wmissing-prototypes.
5796   * This function is used by the kernel light skeleton
5797   * to load bpf programs when modules are loaded or during kernel boot.
5798   * See tools/lib/bpf/skel_internal.h
5799   */
5800  int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5801  
kern_sys_bpf(int cmd,union bpf_attr * attr,unsigned int size)5802  int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5803  {
5804  	struct bpf_prog * __maybe_unused prog;
5805  	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5806  
5807  	switch (cmd) {
5808  #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5809  	case BPF_PROG_TEST_RUN:
5810  		if (attr->test.data_in || attr->test.data_out ||
5811  		    attr->test.ctx_out || attr->test.duration ||
5812  		    attr->test.repeat || attr->test.flags)
5813  			return -EINVAL;
5814  
5815  		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5816  		if (IS_ERR(prog))
5817  			return PTR_ERR(prog);
5818  
5819  		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5820  		    attr->test.ctx_size_in > U16_MAX) {
5821  			bpf_prog_put(prog);
5822  			return -EINVAL;
5823  		}
5824  
5825  		run_ctx.bpf_cookie = 0;
5826  		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5827  			/* recursion detected */
5828  			__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5829  			bpf_prog_put(prog);
5830  			return -EBUSY;
5831  		}
5832  		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5833  		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5834  						&run_ctx);
5835  		bpf_prog_put(prog);
5836  		return 0;
5837  #endif
5838  	default:
5839  		return ____bpf_sys_bpf(cmd, attr, size);
5840  	}
5841  }
5842  EXPORT_SYMBOL(kern_sys_bpf);
5843  
5844  static const struct bpf_func_proto bpf_sys_bpf_proto = {
5845  	.func		= bpf_sys_bpf,
5846  	.gpl_only	= false,
5847  	.ret_type	= RET_INTEGER,
5848  	.arg1_type	= ARG_ANYTHING,
5849  	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
5850  	.arg3_type	= ARG_CONST_SIZE,
5851  };
5852  
5853  const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)5854  tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5855  {
5856  	return bpf_base_func_proto(func_id, prog);
5857  }
5858  
BPF_CALL_1(bpf_sys_close,u32,fd)5859  BPF_CALL_1(bpf_sys_close, u32, fd)
5860  {
5861  	/* When bpf program calls this helper there should not be
5862  	 * an fdget() without matching completed fdput().
5863  	 * This helper is allowed in the following callchain only:
5864  	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5865  	 */
5866  	return close_fd(fd);
5867  }
5868  
5869  static const struct bpf_func_proto bpf_sys_close_proto = {
5870  	.func		= bpf_sys_close,
5871  	.gpl_only	= false,
5872  	.ret_type	= RET_INTEGER,
5873  	.arg1_type	= ARG_ANYTHING,
5874  };
5875  
BPF_CALL_4(bpf_kallsyms_lookup_name,const char *,name,int,name_sz,int,flags,u64 *,res)5876  BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5877  {
5878  	*res = 0;
5879  	if (flags)
5880  		return -EINVAL;
5881  
5882  	if (name_sz <= 1 || name[name_sz - 1])
5883  		return -EINVAL;
5884  
5885  	if (!bpf_dump_raw_ok(current_cred()))
5886  		return -EPERM;
5887  
5888  	*res = kallsyms_lookup_name(name);
5889  	return *res ? 0 : -ENOENT;
5890  }
5891  
5892  static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5893  	.func		= bpf_kallsyms_lookup_name,
5894  	.gpl_only	= false,
5895  	.ret_type	= RET_INTEGER,
5896  	.arg1_type	= ARG_PTR_TO_MEM,
5897  	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
5898  	.arg3_type	= ARG_ANYTHING,
5899  	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
5900  	.arg4_size	= sizeof(u64),
5901  };
5902  
5903  static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)5904  syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5905  {
5906  	switch (func_id) {
5907  	case BPF_FUNC_sys_bpf:
5908  		return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
5909  		       ? NULL : &bpf_sys_bpf_proto;
5910  	case BPF_FUNC_btf_find_by_name_kind:
5911  		return &bpf_btf_find_by_name_kind_proto;
5912  	case BPF_FUNC_sys_close:
5913  		return &bpf_sys_close_proto;
5914  	case BPF_FUNC_kallsyms_lookup_name:
5915  		return &bpf_kallsyms_lookup_name_proto;
5916  	default:
5917  		return tracing_prog_func_proto(func_id, prog);
5918  	}
5919  }
5920  
5921  const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5922  	.get_func_proto  = syscall_prog_func_proto,
5923  	.is_valid_access = syscall_prog_is_valid_access,
5924  };
5925  
5926  const struct bpf_prog_ops bpf_syscall_prog_ops = {
5927  	.test_run = bpf_prog_test_run_syscall,
5928  };
5929  
5930  #ifdef CONFIG_SYSCTL
bpf_stats_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)5931  static int bpf_stats_handler(const struct ctl_table *table, int write,
5932  			     void *buffer, size_t *lenp, loff_t *ppos)
5933  {
5934  	struct static_key *key = (struct static_key *)table->data;
5935  	static int saved_val;
5936  	int val, ret;
5937  	struct ctl_table tmp = {
5938  		.data   = &val,
5939  		.maxlen = sizeof(val),
5940  		.mode   = table->mode,
5941  		.extra1 = SYSCTL_ZERO,
5942  		.extra2 = SYSCTL_ONE,
5943  	};
5944  
5945  	if (write && !capable(CAP_SYS_ADMIN))
5946  		return -EPERM;
5947  
5948  	mutex_lock(&bpf_stats_enabled_mutex);
5949  	val = saved_val;
5950  	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5951  	if (write && !ret && val != saved_val) {
5952  		if (val)
5953  			static_key_slow_inc(key);
5954  		else
5955  			static_key_slow_dec(key);
5956  		saved_val = val;
5957  	}
5958  	mutex_unlock(&bpf_stats_enabled_mutex);
5959  	return ret;
5960  }
5961  
unpriv_ebpf_notify(int new_state)5962  void __weak unpriv_ebpf_notify(int new_state)
5963  {
5964  }
5965  
bpf_unpriv_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)5966  static int bpf_unpriv_handler(const struct ctl_table *table, int write,
5967  			      void *buffer, size_t *lenp, loff_t *ppos)
5968  {
5969  	int ret, unpriv_enable = *(int *)table->data;
5970  	bool locked_state = unpriv_enable == 1;
5971  	struct ctl_table tmp = *table;
5972  
5973  	if (write && !capable(CAP_SYS_ADMIN))
5974  		return -EPERM;
5975  
5976  	tmp.data = &unpriv_enable;
5977  	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5978  	if (write && !ret) {
5979  		if (locked_state && unpriv_enable != 1)
5980  			return -EPERM;
5981  		*(int *)table->data = unpriv_enable;
5982  	}
5983  
5984  	if (write)
5985  		unpriv_ebpf_notify(unpriv_enable);
5986  
5987  	return ret;
5988  }
5989  
5990  static struct ctl_table bpf_syscall_table[] = {
5991  	{
5992  		.procname	= "unprivileged_bpf_disabled",
5993  		.data		= &sysctl_unprivileged_bpf_disabled,
5994  		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
5995  		.mode		= 0644,
5996  		.proc_handler	= bpf_unpriv_handler,
5997  		.extra1		= SYSCTL_ZERO,
5998  		.extra2		= SYSCTL_TWO,
5999  	},
6000  	{
6001  		.procname	= "bpf_stats_enabled",
6002  		.data		= &bpf_stats_enabled_key.key,
6003  		.mode		= 0644,
6004  		.proc_handler	= bpf_stats_handler,
6005  	},
6006  };
6007  
bpf_syscall_sysctl_init(void)6008  static int __init bpf_syscall_sysctl_init(void)
6009  {
6010  	register_sysctl_init("kernel", bpf_syscall_table);
6011  	return 0;
6012  }
6013  late_initcall(bpf_syscall_sysctl_init);
6014  #endif /* CONFIG_SYSCTL */
6015