Lines Matching +full:self +full:- +full:test
1 // SPDX-License-Identifier: GPL-2.0-only
92 /* If specified, test engine looks for this sequence of
94 * test rewrites applied by verifier. Use values
96 * fields if content does not matter. The test case fails if
99 * The sequence could be split into sub-sequences by adding
100 * SKIP_INSNS instruction at the end of each sub-sequence. In
101 * such case sub-sequences are searched for one after another.
104 /* If specified, test engine applies same pattern matching
106 * matched test case is marked as failed.
134 * Can be a tab-separated sequence of expected strings. An empty string
149 void (*fill_helper)(struct bpf_test *self);
190 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) in bpf_fill_ld_abs_vlan_push_pop() argument
192 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */ in bpf_fill_ld_abs_vlan_push_pop()
195 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6; in bpf_fill_ld_abs_vlan_push_pop()
196 struct bpf_insn *insn = self->fill_insns; in bpf_fill_ld_abs_vlan_push_pop()
204 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); in bpf_fill_ld_abs_vlan_push_pop()
211 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); in bpf_fill_ld_abs_vlan_push_pop()
217 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); in bpf_fill_ld_abs_vlan_push_pop()
222 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); in bpf_fill_ld_abs_vlan_push_pop()
228 for (; i < len - 3; i++) in bpf_fill_ld_abs_vlan_push_pop()
230 insn[len - 3] = BPF_JMP_A(1); in bpf_fill_ld_abs_vlan_push_pop()
232 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0); in bpf_fill_ld_abs_vlan_push_pop()
233 insn[len - 1] = BPF_EXIT_INSN(); in bpf_fill_ld_abs_vlan_push_pop()
234 self->prog_len = len; in bpf_fill_ld_abs_vlan_push_pop()
237 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) in bpf_fill_jump_around_ld_abs() argument
239 struct bpf_insn *insn = self->fill_insns; in bpf_fill_jump_around_ld_abs()
251 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2); in bpf_fill_jump_around_ld_abs()
253 while (i < len - 1) in bpf_fill_jump_around_ld_abs()
256 self->prog_len = i + 1; in bpf_fill_jump_around_ld_abs()
259 static void bpf_fill_rand_ld_dw(struct bpf_test *self) in bpf_fill_rand_ld_dw() argument
261 struct bpf_insn *insn = self->fill_insns; in bpf_fill_rand_ld_dw()
266 while (i < self->retval) { in bpf_fill_rand_ld_dw()
279 self->prog_len = i + 1; in bpf_fill_rand_ld_dw()
281 self->retval = (uint32_t)res; in bpf_fill_rand_ld_dw()
286 /* test the sequence of 8k jumps */
287 static void bpf_fill_scale1(struct bpf_test *self) in bpf_fill_scale1() argument
289 struct bpf_insn *insn = self->fill_insns; in bpf_fill_scale1()
293 /* test to check that the long sequence of jumps is acceptable */ in bpf_fill_scale1()
300 -8 * (k % 64 + 1)); in bpf_fill_scale1()
305 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) in bpf_fill_scale1()
308 self->prog_len = i + 1; in bpf_fill_scale1()
309 self->retval = 42; in bpf_fill_scale1()
312 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
313 static void bpf_fill_scale2(struct bpf_test *self) in bpf_fill_scale2() argument
315 struct bpf_insn *insn = self->fill_insns; in bpf_fill_scale2()
324 /* test to check that the long sequence of jumps is acceptable */ in bpf_fill_scale2()
332 -8 * (k % (64 - 4 * FUNC_NEST) + 1)); in bpf_fill_scale2()
334 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) in bpf_fill_scale2()
337 self->prog_len = i + 1; in bpf_fill_scale2()
338 self->retval = 42; in bpf_fill_scale2()
341 static void bpf_fill_scale(struct bpf_test *self) in bpf_fill_scale() argument
343 switch (self->retval) { in bpf_fill_scale()
345 return bpf_fill_scale1(self); in bpf_fill_scale()
347 return bpf_fill_scale2(self); in bpf_fill_scale()
349 self->prog_len = 0; in bpf_fill_scale()
362 insn[i + hlen] = BPF_JMP_A(hlen - i); in bpf_fill_torturous_jumps_insn_1()
364 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1); in bpf_fill_torturous_jumps_insn_1()
365 insn[len - 1] = BPF_EXIT_INSN(); in bpf_fill_torturous_jumps_insn_1()
382 insn[i + j] = BPF_JMP_A(16 - j - 1); in bpf_fill_torturous_jumps_insn_2()
386 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2); in bpf_fill_torturous_jumps_insn_2()
387 insn[len - 1] = BPF_EXIT_INSN(); in bpf_fill_torturous_jumps_insn_2()
392 static void bpf_fill_torturous_jumps(struct bpf_test *self) in bpf_fill_torturous_jumps() argument
394 struct bpf_insn *insn = self->fill_insns; in bpf_fill_torturous_jumps()
397 switch (self->retval) { in bpf_fill_torturous_jumps()
399 self->prog_len = bpf_fill_torturous_jumps_insn_1(insn); in bpf_fill_torturous_jumps()
402 self->prog_len = bpf_fill_torturous_jumps_insn_2(insn); in bpf_fill_torturous_jumps()
408 insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0); in bpf_fill_torturous_jumps()
418 self->prog_len = i; in bpf_fill_torturous_jumps()
421 self->prog_len = 0; in bpf_fill_torturous_jumps()
426 static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self) in bpf_fill_big_prog_with_loop_1() argument
428 struct bpf_insn *insn = self->fill_insns; in bpf_fill_big_prog_with_loop_1()
429 /* This test was added to catch a specific use after free in bpf_fill_big_prog_with_loop_1()
436 const int len = getpagesize() - 25; in bpf_fill_big_prog_with_loop_1()
451 while (i < len - 3) in bpf_fill_big_prog_with_loop_1()
459 insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1; in bpf_fill_big_prog_with_loop_1()
460 self->func_info[1].insn_off = callback_idx; in bpf_fill_big_prog_with_loop_1()
461 self->prog_len = i; in bpf_fill_big_prog_with_loop_1()
469 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
471 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
473 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
474 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
499 * positive u32, and zero-extend it into 64-bit.
509 * negative u32, and sign-extend it into 64-bit.
529 for (len = MAX_INSNS - 1; len > 0; --len) in probe_filter_length()
556 return -1; in __create_map()
613 return -1; in create_prog_array()
615 return -1; in create_prog_array()
630 mfd = -1; in create_prog_array()
647 return -1; in create_map_in_map()
657 return -1; in create_map_in_map()
677 return -1; in create_cgroup_storage()
774 btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts); in load_btf_spec()
780 return btf_fd < 0 ? -1 : btf_fd; in load_btf_spec()
789 static int load_btf_for_test(struct bpf_test *test) in load_btf_for_test() argument
794 test->btf_types[types_num] != BTF_END_RAW) in load_btf_for_test()
797 int types_len = types_num * sizeof(test->btf_types[0]); in load_btf_for_test()
799 return load_btf_spec(test->btf_types, types_len, in load_btf_for_test()
800 test->btf_strings, sizeof(test->btf_strings)); in load_btf_for_test()
813 return -1; in create_map_spin_lock()
832 return -1; in create_sk_storage_map()
851 return -1; in create_map_timer()
870 return -1; in create_map_kptr()
976 while (fixup_kfunc_btf_id->kfunc) { in fixup_prog_kfuncs()
983 fixup_kfunc_btf_id->kfunc, in fixup_prog_kfuncs()
993 fixup_kfunc_btf_id->kfunc, in fixup_prog_kfuncs()
1001 prog[fixup_kfunc_btf_id->insn_idx].off = 1; in fixup_prog_kfuncs()
1006 prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id; in fixup_prog_kfuncs()
1011 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, in do_test_fixup() argument
1014 int *fixup_map_hash_8b = test->fixup_map_hash_8b; in do_test_fixup()
1015 int *fixup_map_hash_48b = test->fixup_map_hash_48b; in do_test_fixup()
1016 int *fixup_map_hash_16b = test->fixup_map_hash_16b; in do_test_fixup()
1017 int *fixup_map_array_48b = test->fixup_map_array_48b; in do_test_fixup()
1018 int *fixup_map_sockmap = test->fixup_map_sockmap; in do_test_fixup()
1019 int *fixup_map_sockhash = test->fixup_map_sockhash; in do_test_fixup()
1020 int *fixup_map_xskmap = test->fixup_map_xskmap; in do_test_fixup()
1021 int *fixup_map_stacktrace = test->fixup_map_stacktrace; in do_test_fixup()
1022 int *fixup_prog1 = test->fixup_prog1; in do_test_fixup()
1023 int *fixup_prog2 = test->fixup_prog2; in do_test_fixup()
1024 int *fixup_map_in_map = test->fixup_map_in_map; in do_test_fixup()
1025 int *fixup_cgroup_storage = test->fixup_cgroup_storage; in do_test_fixup()
1026 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage; in do_test_fixup()
1027 int *fixup_map_spin_lock = test->fixup_map_spin_lock; in do_test_fixup()
1028 int *fixup_map_array_ro = test->fixup_map_array_ro; in do_test_fixup()
1029 int *fixup_map_array_wo = test->fixup_map_array_wo; in do_test_fixup()
1030 int *fixup_map_array_small = test->fixup_map_array_small; in do_test_fixup()
1031 int *fixup_sk_storage_map = test->fixup_sk_storage_map; in do_test_fixup()
1032 int *fixup_map_event_output = test->fixup_map_event_output; in do_test_fixup()
1033 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; in do_test_fixup()
1034 int *fixup_map_ringbuf = test->fixup_map_ringbuf; in do_test_fixup()
1035 int *fixup_map_timer = test->fixup_map_timer; in do_test_fixup()
1036 int *fixup_map_kptr = test->fixup_map_kptr; in do_test_fixup()
1038 if (test->fill_helper) { in do_test_fixup()
1039 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); in do_test_fixup()
1040 test->fill_helper(test); in do_test_fixup()
1043 /* Allocating HTs with 1 elem is fine here, since we only test in do_test_fixup()
1237 fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id); in do_test_fixup()
1305 /* Returns true if every part of exp (tab-separated) appears in log, in order.
1322 len = p - exp; in cmp_str_seq()
1371 if (masked->imm == INSN_IMM_MASK) in compare_masked_insn()
1373 if (masked->off == INSN_OFF_MASK) in compare_masked_insn()
1385 return -1; in find_insn_subseq()
1387 for (i = 0; i < seq_len - subseq_len + 1; ++i) { in find_insn_subseq()
1400 return -1; in find_insn_subseq()
1411 return -1; in find_skip_insn_marker()
1414 /* Return true if all sub-sequences in `subseqs` could be found in
1415 * `seq` one after another. Sub-sequences are separated by a single
1432 seq_len -= subseq_idx + cur_subseq_len; in find_all_insn_subseqs()
1434 subseqs_len -= cur_subseq_len + 1; in find_all_insn_subseqs()
1455 i, insn->code, insn->dst_reg, in print_insn()
1456 insn->src_reg, insn->off, insn->imm); in print_insn()
1460 static bool check_xlated_program(struct bpf_test *test, int fd_prog) in check_xlated_program() argument
1465 bool check_expected = !is_null_insn(test->expected_insns); in check_xlated_program()
1466 bool check_unexpected = !is_null_insn(test->unexpected_insns); in check_xlated_program()
1478 !find_all_insn_subseqs(buf, test->expected_insns, in check_xlated_program()
1486 print_insn(test->expected_insns, MAX_EXPECTED_INSNS); in check_xlated_program()
1491 find_all_insn_subseqs(buf, test->unexpected_insns, in check_xlated_program()
1498 printf("Un-expected subsequence:\n"); in check_xlated_program()
1499 print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS); in check_xlated_program()
1508 static void do_test_single(struct bpf_test *test, bool unpriv, in do_test_single() argument
1512 int prog_len, prog_type = test->prog_type; in do_test_single()
1513 struct bpf_insn *prog = test->insns; in do_test_single()
1518 int fd_array[2] = { -1, -1 }; in do_test_single()
1524 if ((test->flags & F_NEEDS_JIT_ENABLED) && jit_disabled) { in do_test_single()
1531 fd_prog = -1; in do_test_single()
1533 map_fds[i] = -1; in do_test_single()
1534 btf_fd = -1; in do_test_single()
1539 do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]); in do_test_single()
1540 if (test->fill_insns) { in do_test_single()
1541 prog = test->fill_insns; in do_test_single()
1542 prog_len = test->prog_len; in do_test_single()
1547 * features, skip this test. in do_test_single()
1553 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) in do_test_single()
1555 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) in do_test_single()
1557 if (test->flags & ~3) in do_test_single()
1558 pflags |= test->flags; in do_test_single()
1560 expected_ret = unpriv && test->result_unpriv != UNDEF ? in do_test_single()
1561 test->result_unpriv : test->result; in do_test_single()
1562 expected_err = unpriv && test->errstr_unpriv ? in do_test_single()
1563 test->errstr_unpriv : test->errstr; in do_test_single()
1565 opts.expected_attach_type = test->expected_attach_type; in do_test_single()
1573 if (fd_array[1] != -1) in do_test_single()
1577 prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) { in do_test_single()
1580 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc, in do_test_single()
1584 test->kfunc); in do_test_single()
1592 if (test->btf_types[0] != 0) { in do_test_single()
1593 btf_fd = load_btf_for_test(test); in do_test_single()
1599 if (test->func_info_cnt != 0) { in do_test_single()
1600 opts.func_info = test->func_info; in do_test_single()
1601 opts.func_info_cnt = test->func_info_cnt; in do_test_single()
1602 opts.func_info_rec_size = sizeof(test->func_info[0]); in do_test_single()
1636 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) in do_test_single()
1654 if (!unpriv && test->insn_processed) { in do_test_single()
1660 if (test->insn_processed != insn_processed) { in do_test_single()
1662 insn_processed, test->insn_processed); in do_test_single()
1670 if (!check_xlated_program(test, fd_prog)) in do_test_single()
1675 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) { in do_test_single()
1679 if (!test->runs) in do_test_single()
1680 test->runs = 1; in do_test_single()
1682 for (i = 0; i < test->runs; i++) { in do_test_single()
1683 if (unpriv && test->retvals[i].retval_unpriv) in do_test_single()
1684 expected_val = test->retvals[i].retval_unpriv; in do_test_single()
1686 expected_val = test->retvals[i].retval; in do_test_single()
1689 test->retvals[i].data, in do_test_single()
1690 sizeof(test->retvals[i].data)); in do_test_single()
1692 printf("(run %d/%d) ", i + 1, test->runs); in do_test_single()
1713 if (test->fill_insns) in do_test_single()
1714 free(test->fill_insns); in do_test_single()
1731 /* The test checks for finer cap as CAP_NET_ADMIN, in is_admin()
1743 static bool test_as_unpriv(struct bpf_test *test) in test_as_unpriv() argument
1755 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) in test_as_unpriv()
1758 return !test->prog_type || in test_as_unpriv()
1759 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER || in test_as_unpriv()
1760 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB; in test_as_unpriv()
1774 struct bpf_test *test = &tests[i]; in do_test() local
1776 /* Program types that are not supported by non-root we in do_test()
1779 if (test_as_unpriv(test) && unpriv_disabled) { in do_test()
1780 printf("#%d/u %s SKIP\n", i, test->descr); in do_test()
1782 } else if (test_as_unpriv(test)) { in do_test()
1785 printf("#%d/u %s ", i, test->descr); in do_test()
1786 do_test_single(test, true, &passes, &errors); in do_test()
1792 printf("#%d/p %s SKIP\n", i, test->descr); in do_test()
1795 printf("#%d/p %s ", i, test->descr); in do_test()
1796 do_test_single(test, false, &passes, &errors); in do_test()
1814 if (argc > 1 && strcmp(argv[1], "-v") == 0) { in main()
1818 argc--; in main()
1820 if (argc > 1 && strcmp(argv[1], "-vv") == 0) { in main()
1824 argc--; in main()