1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 #include <linux/kernel.h>
4 #include <linux/filter.h>
5 #include "bpf.h"
6 #include "libbpf.h"
7 #include "libbpf_common.h"
8 #include "libbpf_internal.h"
9 #include "str_error.h"
10
ptr_to_u64(const void * ptr)11 static inline __u64 ptr_to_u64(const void *ptr)
12 {
13 return (__u64)(unsigned long)ptr;
14 }
15
probe_fd(int fd)16 int probe_fd(int fd)
17 {
18 if (fd >= 0)
19 close(fd);
20 return fd >= 0;
21 }
22
probe_kern_prog_name(int token_fd)23 static int probe_kern_prog_name(int token_fd)
24 {
25 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
26 struct bpf_insn insns[] = {
27 BPF_MOV64_IMM(BPF_REG_0, 0),
28 BPF_EXIT_INSN(),
29 };
30 union bpf_attr attr;
31 int ret;
32
33 memset(&attr, 0, attr_sz);
34 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
35 attr.license = ptr_to_u64("GPL");
36 attr.insns = ptr_to_u64(insns);
37 attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
38 attr.prog_token_fd = token_fd;
39 if (token_fd)
40 attr.prog_flags |= BPF_F_TOKEN_FD;
41 libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
42
43 /* make sure loading with name works */
44 ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
45 return probe_fd(ret);
46 }
47
probe_kern_global_data(int token_fd)48 static int probe_kern_global_data(int token_fd)
49 {
50 char *cp, errmsg[STRERR_BUFSIZE];
51 struct bpf_insn insns[] = {
52 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
53 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
54 BPF_MOV64_IMM(BPF_REG_0, 0),
55 BPF_EXIT_INSN(),
56 };
57 LIBBPF_OPTS(bpf_map_create_opts, map_opts,
58 .token_fd = token_fd,
59 .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
60 );
61 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
62 .token_fd = token_fd,
63 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
64 );
65 int ret, map, insn_cnt = ARRAY_SIZE(insns);
66
67 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
68 if (map < 0) {
69 ret = -errno;
70 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
71 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
72 __func__, cp, -ret);
73 return ret;
74 }
75
76 insns[0].imm = map;
77
78 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
79 close(map);
80 return probe_fd(ret);
81 }
82
probe_kern_btf(int token_fd)83 static int probe_kern_btf(int token_fd)
84 {
85 static const char strs[] = "\0int";
86 __u32 types[] = {
87 /* int */
88 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
89 };
90
91 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
92 strs, sizeof(strs), token_fd));
93 }
94
probe_kern_btf_func(int token_fd)95 static int probe_kern_btf_func(int token_fd)
96 {
97 static const char strs[] = "\0int\0x\0a";
98 /* void x(int a) {} */
99 __u32 types[] = {
100 /* int */
101 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
102 /* FUNC_PROTO */ /* [2] */
103 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
104 BTF_PARAM_ENC(7, 1),
105 /* FUNC x */ /* [3] */
106 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
107 };
108
109 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
110 strs, sizeof(strs), token_fd));
111 }
112
probe_kern_btf_func_global(int token_fd)113 static int probe_kern_btf_func_global(int token_fd)
114 {
115 static const char strs[] = "\0int\0x\0a";
116 /* static void x(int a) {} */
117 __u32 types[] = {
118 /* int */
119 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
120 /* FUNC_PROTO */ /* [2] */
121 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
122 BTF_PARAM_ENC(7, 1),
123 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
124 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
125 };
126
127 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
128 strs, sizeof(strs), token_fd));
129 }
130
probe_kern_btf_datasec(int token_fd)131 static int probe_kern_btf_datasec(int token_fd)
132 {
133 static const char strs[] = "\0x\0.data";
134 /* static int a; */
135 __u32 types[] = {
136 /* int */
137 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
138 /* VAR x */ /* [2] */
139 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
140 BTF_VAR_STATIC,
141 /* DATASEC val */ /* [3] */
142 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
143 BTF_VAR_SECINFO_ENC(2, 0, 4),
144 };
145
146 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
147 strs, sizeof(strs), token_fd));
148 }
149
probe_kern_btf_qmark_datasec(int token_fd)150 static int probe_kern_btf_qmark_datasec(int token_fd)
151 {
152 static const char strs[] = "\0x\0?.data";
153 /* static int a; */
154 __u32 types[] = {
155 /* int */
156 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
157 /* VAR x */ /* [2] */
158 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
159 BTF_VAR_STATIC,
160 /* DATASEC ?.data */ /* [3] */
161 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
162 BTF_VAR_SECINFO_ENC(2, 0, 4),
163 };
164
165 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
166 strs, sizeof(strs), token_fd));
167 }
168
probe_kern_btf_float(int token_fd)169 static int probe_kern_btf_float(int token_fd)
170 {
171 static const char strs[] = "\0float";
172 __u32 types[] = {
173 /* float */
174 BTF_TYPE_FLOAT_ENC(1, 4),
175 };
176
177 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
178 strs, sizeof(strs), token_fd));
179 }
180
probe_kern_btf_decl_tag(int token_fd)181 static int probe_kern_btf_decl_tag(int token_fd)
182 {
183 static const char strs[] = "\0tag";
184 __u32 types[] = {
185 /* int */
186 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
187 /* VAR x */ /* [2] */
188 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
189 BTF_VAR_STATIC,
190 /* attr */
191 BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
192 };
193
194 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
195 strs, sizeof(strs), token_fd));
196 }
197
probe_kern_btf_type_tag(int token_fd)198 static int probe_kern_btf_type_tag(int token_fd)
199 {
200 static const char strs[] = "\0tag";
201 __u32 types[] = {
202 /* int */
203 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
204 /* attr */
205 BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
206 /* ptr */
207 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
208 };
209
210 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
211 strs, sizeof(strs), token_fd));
212 }
213
probe_kern_array_mmap(int token_fd)214 static int probe_kern_array_mmap(int token_fd)
215 {
216 LIBBPF_OPTS(bpf_map_create_opts, opts,
217 .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
218 .token_fd = token_fd,
219 );
220 int fd;
221
222 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
223 return probe_fd(fd);
224 }
225
probe_kern_exp_attach_type(int token_fd)226 static int probe_kern_exp_attach_type(int token_fd)
227 {
228 LIBBPF_OPTS(bpf_prog_load_opts, opts,
229 .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
230 .token_fd = token_fd,
231 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
232 );
233 struct bpf_insn insns[] = {
234 BPF_MOV64_IMM(BPF_REG_0, 0),
235 BPF_EXIT_INSN(),
236 };
237 int fd, insn_cnt = ARRAY_SIZE(insns);
238
239 /* use any valid combination of program type and (optional)
240 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
241 * to see if kernel supports expected_attach_type field for
242 * BPF_PROG_LOAD command
243 */
244 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
245 return probe_fd(fd);
246 }
247
probe_kern_probe_read_kernel(int token_fd)248 static int probe_kern_probe_read_kernel(int token_fd)
249 {
250 LIBBPF_OPTS(bpf_prog_load_opts, opts,
251 .token_fd = token_fd,
252 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
253 );
254 struct bpf_insn insns[] = {
255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
257 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
258 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
260 BPF_EXIT_INSN(),
261 };
262 int fd, insn_cnt = ARRAY_SIZE(insns);
263
264 fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
265 return probe_fd(fd);
266 }
267
probe_prog_bind_map(int token_fd)268 static int probe_prog_bind_map(int token_fd)
269 {
270 char *cp, errmsg[STRERR_BUFSIZE];
271 struct bpf_insn insns[] = {
272 BPF_MOV64_IMM(BPF_REG_0, 0),
273 BPF_EXIT_INSN(),
274 };
275 LIBBPF_OPTS(bpf_map_create_opts, map_opts,
276 .token_fd = token_fd,
277 .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
278 );
279 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
280 .token_fd = token_fd,
281 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
282 );
283 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
284
285 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
286 if (map < 0) {
287 ret = -errno;
288 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
289 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
290 __func__, cp, -ret);
291 return ret;
292 }
293
294 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
295 if (prog < 0) {
296 close(map);
297 return 0;
298 }
299
300 ret = bpf_prog_bind_map(prog, map, NULL);
301
302 close(map);
303 close(prog);
304
305 return ret >= 0;
306 }
307
probe_module_btf(int token_fd)308 static int probe_module_btf(int token_fd)
309 {
310 static const char strs[] = "\0int";
311 __u32 types[] = {
312 /* int */
313 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
314 };
315 struct bpf_btf_info info;
316 __u32 len = sizeof(info);
317 char name[16];
318 int fd, err;
319
320 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
321 if (fd < 0)
322 return 0; /* BTF not supported at all */
323
324 memset(&info, 0, sizeof(info));
325 info.name = ptr_to_u64(name);
326 info.name_len = sizeof(name);
327
328 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
329 * kernel's module BTF support coincides with support for
330 * name/name_len fields in struct bpf_btf_info.
331 */
332 err = bpf_btf_get_info_by_fd(fd, &info, &len);
333 close(fd);
334 return !err;
335 }
336
probe_perf_link(int token_fd)337 static int probe_perf_link(int token_fd)
338 {
339 struct bpf_insn insns[] = {
340 BPF_MOV64_IMM(BPF_REG_0, 0),
341 BPF_EXIT_INSN(),
342 };
343 LIBBPF_OPTS(bpf_prog_load_opts, opts,
344 .token_fd = token_fd,
345 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
346 );
347 int prog_fd, link_fd, err;
348
349 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
350 insns, ARRAY_SIZE(insns), &opts);
351 if (prog_fd < 0)
352 return -errno;
353
354 /* use invalid perf_event FD to get EBADF, if link is supported;
355 * otherwise EINVAL should be returned
356 */
357 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
358 err = -errno; /* close() can clobber errno */
359
360 if (link_fd >= 0)
361 close(link_fd);
362 close(prog_fd);
363
364 return link_fd < 0 && err == -EBADF;
365 }
366
probe_uprobe_multi_link(int token_fd)367 static int probe_uprobe_multi_link(int token_fd)
368 {
369 LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
370 .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
371 .token_fd = token_fd,
372 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
373 );
374 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
375 struct bpf_insn insns[] = {
376 BPF_MOV64_IMM(BPF_REG_0, 0),
377 BPF_EXIT_INSN(),
378 };
379 int prog_fd, link_fd, err;
380 unsigned long offset = 0;
381
382 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
383 insns, ARRAY_SIZE(insns), &load_opts);
384 if (prog_fd < 0)
385 return -errno;
386
387 /* Creating uprobe in '/' binary should fail with -EBADF. */
388 link_opts.uprobe_multi.path = "/";
389 link_opts.uprobe_multi.offsets = &offset;
390 link_opts.uprobe_multi.cnt = 1;
391
392 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
393 err = -errno; /* close() can clobber errno */
394
395 if (link_fd >= 0 || err != -EBADF) {
396 if (link_fd >= 0)
397 close(link_fd);
398 close(prog_fd);
399 return 0;
400 }
401
402 /* Initial multi-uprobe support in kernel didn't handle PID filtering
403 * correctly (it was doing thread filtering, not process filtering).
404 * So now we'll detect if PID filtering logic was fixed, and, if not,
405 * we'll pretend multi-uprobes are not supported, if not.
406 * Multi-uprobes are used in USDT attachment logic, and we need to be
407 * conservative here, because multi-uprobe selection happens early at
408 * load time, while the use of PID filtering is known late at
409 * attachment time, at which point it's too late to undo multi-uprobe
410 * selection.
411 *
412 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
413 * early with -EINVAL on kernels with fixed PID filtering logic;
414 * otherwise -ESRCH would be returned if passed correct binary path
415 * (but we'll just get -BADF, of course).
416 */
417 link_opts.uprobe_multi.pid = -1; /* invalid PID */
418 link_opts.uprobe_multi.path = "/"; /* invalid path */
419 link_opts.uprobe_multi.offsets = &offset;
420 link_opts.uprobe_multi.cnt = 1;
421
422 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
423 err = -errno; /* close() can clobber errno */
424
425 if (link_fd >= 0)
426 close(link_fd);
427 close(prog_fd);
428
429 return link_fd < 0 && err == -EINVAL;
430 }
431
probe_kern_bpf_cookie(int token_fd)432 static int probe_kern_bpf_cookie(int token_fd)
433 {
434 struct bpf_insn insns[] = {
435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
436 BPF_EXIT_INSN(),
437 };
438 LIBBPF_OPTS(bpf_prog_load_opts, opts,
439 .token_fd = token_fd,
440 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
441 );
442 int ret, insn_cnt = ARRAY_SIZE(insns);
443
444 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
445 return probe_fd(ret);
446 }
447
probe_kern_btf_enum64(int token_fd)448 static int probe_kern_btf_enum64(int token_fd)
449 {
450 static const char strs[] = "\0enum64";
451 __u32 types[] = {
452 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
453 };
454
455 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
456 strs, sizeof(strs), token_fd));
457 }
458
probe_kern_arg_ctx_tag(int token_fd)459 static int probe_kern_arg_ctx_tag(int token_fd)
460 {
461 static const char strs[] = "\0a\0b\0arg:ctx\0";
462 const __u32 types[] = {
463 /* [1] INT */
464 BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
465 /* [2] PTR -> VOID */
466 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
467 /* [3] FUNC_PROTO `int(void *a)` */
468 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
469 BTF_PARAM_ENC(1 /* "a" */, 2),
470 /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
471 BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
472 /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
473 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
474 BTF_PARAM_ENC(3 /* "b" */, 2),
475 /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
476 BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
477 /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
478 BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
479 };
480 const struct bpf_insn insns[] = {
481 /* main prog */
482 BPF_CALL_REL(+1),
483 BPF_EXIT_INSN(),
484 /* global subprog */
485 BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
486 BPF_EXIT_INSN(),
487 };
488 const struct bpf_func_info_min func_infos[] = {
489 { 0, 4 }, /* main prog -> FUNC 'a' */
490 { 2, 6 }, /* subprog -> FUNC 'b' */
491 };
492 LIBBPF_OPTS(bpf_prog_load_opts, opts,
493 .token_fd = token_fd,
494 .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
495 );
496 int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
497
498 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
499 if (btf_fd < 0)
500 return 0;
501
502 opts.prog_btf_fd = btf_fd;
503 opts.func_info = &func_infos;
504 opts.func_info_cnt = ARRAY_SIZE(func_infos);
505 opts.func_info_rec_size = sizeof(func_infos[0]);
506
507 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
508 "GPL", insns, insn_cnt, &opts);
509 close(btf_fd);
510
511 return probe_fd(prog_fd);
512 }
513
514 typedef int (*feature_probe_fn)(int /* token_fd */);
515
516 static struct kern_feature_cache feature_cache;
517
518 static struct kern_feature_desc {
519 const char *desc;
520 feature_probe_fn probe;
521 } feature_probes[__FEAT_CNT] = {
522 [FEAT_PROG_NAME] = {
523 "BPF program name", probe_kern_prog_name,
524 },
525 [FEAT_GLOBAL_DATA] = {
526 "global variables", probe_kern_global_data,
527 },
528 [FEAT_BTF] = {
529 "minimal BTF", probe_kern_btf,
530 },
531 [FEAT_BTF_FUNC] = {
532 "BTF functions", probe_kern_btf_func,
533 },
534 [FEAT_BTF_GLOBAL_FUNC] = {
535 "BTF global function", probe_kern_btf_func_global,
536 },
537 [FEAT_BTF_DATASEC] = {
538 "BTF data section and variable", probe_kern_btf_datasec,
539 },
540 [FEAT_ARRAY_MMAP] = {
541 "ARRAY map mmap()", probe_kern_array_mmap,
542 },
543 [FEAT_EXP_ATTACH_TYPE] = {
544 "BPF_PROG_LOAD expected_attach_type attribute",
545 probe_kern_exp_attach_type,
546 },
547 [FEAT_PROBE_READ_KERN] = {
548 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
549 },
550 [FEAT_PROG_BIND_MAP] = {
551 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
552 },
553 [FEAT_MODULE_BTF] = {
554 "module BTF support", probe_module_btf,
555 },
556 [FEAT_BTF_FLOAT] = {
557 "BTF_KIND_FLOAT support", probe_kern_btf_float,
558 },
559 [FEAT_PERF_LINK] = {
560 "BPF perf link support", probe_perf_link,
561 },
562 [FEAT_BTF_DECL_TAG] = {
563 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
564 },
565 [FEAT_BTF_TYPE_TAG] = {
566 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
567 },
568 [FEAT_MEMCG_ACCOUNT] = {
569 "memcg-based memory accounting", probe_memcg_account,
570 },
571 [FEAT_BPF_COOKIE] = {
572 "BPF cookie support", probe_kern_bpf_cookie,
573 },
574 [FEAT_BTF_ENUM64] = {
575 "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
576 },
577 [FEAT_SYSCALL_WRAPPER] = {
578 "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
579 },
580 [FEAT_UPROBE_MULTI_LINK] = {
581 "BPF multi-uprobe link support", probe_uprobe_multi_link,
582 },
583 [FEAT_ARG_CTX_TAG] = {
584 "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
585 },
586 [FEAT_BTF_QMARK_DATASEC] = {
587 "BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
588 },
589 };
590
feat_supported(struct kern_feature_cache * cache,enum kern_feature_id feat_id)591 bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
592 {
593 struct kern_feature_desc *feat = &feature_probes[feat_id];
594 int ret;
595
596 /* assume global feature cache, unless custom one is provided */
597 if (!cache)
598 cache = &feature_cache;
599
600 if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
601 ret = feat->probe(cache->token_fd);
602 if (ret > 0) {
603 WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
604 } else if (ret == 0) {
605 WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
606 } else {
607 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
608 WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
609 }
610 }
611
612 return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
613 }
614