Lines Matching +full:three +full:- +full:conversion +full:- +full:cycles

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <linux/bpf-cgroup.h>
23 #include <linux/error-injection.h>
54 * The first pass is depth-first-search to check that the program is a DAG.
56 * - larger than BPF_MAXINSNS insns
57 * - if loop is present (detected via back-edge)
58 * - unreachable insns exist (shouldn't be a forest. program = one function)
59 * - out of bounds or malformed jumps
71 * All registers are 64-bit.
72 * R0 - return register
73 * R1-R5 argument passing registers
74 * R6-R9 callee saved registers
75 * R10 - frame pointer read-only
82 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
87 * (and -20 constant is saved for further stack bounds checking).
127 * [key, key + map->key_size) bytes are valid and were initialized on
133 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
137 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
138 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
140 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
145 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
153 * After the call R0 is set to return type of the function and registers R1-R5
159 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
165 * passes through a NULL-check conditional. For the branch wherein the state is
209 return aux->map_ptr_state.poison; in bpf_map_ptr_poisoned()
214 return aux->map_ptr_state.unpriv; in bpf_map_ptr_unpriv()
222 aux->map_ptr_state.unpriv = unpriv; in bpf_map_ptr_store()
223 aux->map_ptr_state.poison = poison; in bpf_map_ptr_store()
224 aux->map_ptr_state.map_ptr = map; in bpf_map_ptr_store()
229 return aux->map_key_state & BPF_MAP_KEY_POISON; in bpf_map_key_poisoned()
234 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); in bpf_map_key_unseen()
239 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); in bpf_map_key_immediate()
246 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store()
252 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_helper_call()
253 insn->src_reg == 0; in bpf_helper_call()
258 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_call()
259 insn->src_reg == BPF_PSEUDO_CALL; in bpf_pseudo_call()
264 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_kfunc_call()
265 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; in bpf_pseudo_kfunc_call()
308 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
309 * generally to pass info about user-defined local kptr types to later
348 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); in btf_type_name()
359 if (!bpf_verifier_log_needed(&env->log)) in verbose()
363 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
375 if (reg->smin_value > S64_MIN) { in verbose_invalid_scalar()
376 verbose(env, " smin=%lld", reg->smin_value); in verbose_invalid_scalar()
379 if (reg->smax_value < S64_MAX) { in verbose_invalid_scalar()
380 verbose(env, " smax=%lld", reg->smax_value); in verbose_invalid_scalar()
392 type = reg->type; in reg_not_null()
411 if (reg->type == PTR_TO_MAP_VALUE) { in reg_btf_record()
412 rec = reg->map_ptr->record; in reg_btf_record()
413 } else if (type_is_ptr_alloc_obj(reg->type)) { in reg_btf_record()
414 meta = btf_find_struct_meta(reg->btf, reg->btf_id); in reg_btf_record()
416 rec = meta->record; in reg_btf_record()
423 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; in subprog_is_global()
432 if (!env->prog->aux->func_info) in subprog_name()
435 info = &env->prog->aux->func_info[subprog]; in subprog_name()
436 return btf_type_name(env->prog->aux->btf, info->type_id); in subprog_name()
443 info->is_cb = true; in mark_subprog_exc_cb()
444 info->is_async_cb = true; in mark_subprog_exc_cb()
445 info->is_exception_cb = true; in mark_subprog_exc_cb()
450 return subprog_info(env, subprog)->is_exception_cb; in subprog_is_exc_cb()
466 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; in is_acquire_function()
528 return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || in is_sync_callback_calling_insn()
529 (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); in is_sync_callback_calling_insn()
534 return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) || in is_async_callback_calling_insn()
535 (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm)); in is_async_callback_calling_insn()
540 return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO; in is_may_goto_insn()
545 return is_may_goto_insn(&env->prog->insnsi[insn_idx]); in is_may_goto_insn_at()
573 return BPF_CLASS(insn->code) == BPF_STX && in is_cmpxchg_insn()
574 BPF_MODE(insn->code) == BPF_ATOMIC && in is_cmpxchg_insn()
575 insn->imm == BPF_CMPXCHG; in is_cmpxchg_insn()
580 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
586 struct bpf_verifier_state *cur = env->cur_state; in func()
588 return cur->frame[reg->frameno]; in func()
593 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
595 /* We need to check that slots between [spi - nr_slots + 1, spi] are in is_spi_bounds_valid()
600 * spi and the second slot will be at spi - 1. in is_spi_bounds_valid()
602 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; in is_spi_bounds_valid()
610 if (!tnum_is_const(reg->var_off)) { in stack_slot_obj_get_spi()
612 return -EINVAL; in stack_slot_obj_get_spi()
615 off = reg->off + reg->var_off.value; in stack_slot_obj_get_spi()
618 return -EINVAL; in stack_slot_obj_get_spi()
624 return -EINVAL; in stack_slot_obj_get_spi()
628 return -ERANGE; in stack_slot_obj_get_spi()
691 int id = ++env->id_gen; in mark_dynptr_stack_regs()
701 __mark_dynptr_reg(reg, type, true, ++env->id_gen); in mark_dynptr_cb_reg()
718 /* We cannot assume both spi and spi - 1 belong to the same dynptr, in mark_stack_slots_dynptr()
730 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); in mark_stack_slots_dynptr()
735 state->stack[spi].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
736 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
741 return -EINVAL; in mark_stack_slots_dynptr()
743 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, in mark_stack_slots_dynptr()
744 &state->stack[spi - 1].spilled_ptr, type); in mark_stack_slots_dynptr()
758 state->stack[spi].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
759 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
762 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in mark_stack_slots_dynptr()
763 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in mark_stack_slots_dynptr()
773 state->stack[spi].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
774 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
777 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in invalidate_dynptr()
778 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in invalidate_dynptr()
786 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of in invalidate_dynptr()
791 * default (where the default reg state has its reg->parent as NULL), or in invalidate_dynptr()
793 * mark_reg_read won't walk reg->parent chain), but not randomly during in invalidate_dynptr()
795 * parentage chain will still be live (i.e. reg->parent may be in invalidate_dynptr()
796 * non-NULL), while earlier reg->parent was NULL, so we need in invalidate_dynptr()
801 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in invalidate_dynptr()
802 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in invalidate_dynptr()
814 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in unmark_stack_slots_dynptr()
819 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; in unmark_stack_slots_dynptr()
832 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
833 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) in unmark_stack_slots_dynptr()
840 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { in unmark_stack_slots_dynptr()
842 return -EFAULT; in unmark_stack_slots_dynptr()
844 if (state->stack[i].spilled_ptr.dynptr.first_slot) in unmark_stack_slots_dynptr()
856 if (!env->allow_ptr_leaks) in mark_reg_invalid()
874 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) in destroy_if_dynptr_stack_slot()
878 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in destroy_if_dynptr_stack_slot()
881 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in destroy_if_dynptr_stack_slot()
883 return -EINVAL; in destroy_if_dynptr_stack_slot()
887 mark_stack_slot_scratched(env, spi - 1); in destroy_if_dynptr_stack_slot()
891 state->stack[spi].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
892 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
895 dynptr_id = state->stack[spi].spilled_ptr.id; in destroy_if_dynptr_stack_slot()
897 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ in destroy_if_dynptr_stack_slot()
899 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) in destroy_if_dynptr_stack_slot()
901 if (dreg->dynptr_id == dynptr_id) in destroy_if_dynptr_stack_slot()
908 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in destroy_if_dynptr_stack_slot()
909 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in destroy_if_dynptr_stack_slot()
912 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in destroy_if_dynptr_stack_slot()
913 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in destroy_if_dynptr_stack_slot()
922 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_uninit()
927 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an in is_dynptr_reg_valid_uninit()
931 if (spi < 0 && spi != -ERANGE) in is_dynptr_reg_valid_uninit()
957 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_init()
963 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in is_dynptr_reg_valid_init()
967 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || in is_dynptr_reg_valid_init()
968 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) in is_dynptr_reg_valid_init()
987 if (reg->type == CONST_PTR_TO_DYNPTR) { in is_dynptr_type_expected()
988 return reg->dynptr.type == dynptr_type; in is_dynptr_type_expected()
993 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; in is_dynptr_type_expected()
1020 struct bpf_stack_state *slot = &state->stack[spi - i]; in mark_stack_slots_iter()
1021 struct bpf_reg_state *st = &slot->spilled_ptr; in mark_stack_slots_iter()
1024 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ in mark_stack_slots_iter()
1027 st->type |= MEM_RCU; in mark_stack_slots_iter()
1029 st->type |= PTR_UNTRUSTED; in mark_stack_slots_iter()
1031 st->live |= REG_LIVE_WRITTEN; in mark_stack_slots_iter()
1032 st->ref_obj_id = i == 0 ? id : 0; in mark_stack_slots_iter()
1033 st->iter.btf = btf; in mark_stack_slots_iter()
1034 st->iter.btf_id = btf_id; in mark_stack_slots_iter()
1035 st->iter.state = BPF_ITER_STATE_ACTIVE; in mark_stack_slots_iter()
1036 st->iter.depth = 0; in mark_stack_slots_iter()
1039 slot->slot_type[j] = STACK_ITER; in mark_stack_slots_iter()
1041 mark_stack_slot_scratched(env, spi - i); in mark_stack_slots_iter()
1058 struct bpf_stack_state *slot = &state->stack[spi - i]; in unmark_stack_slots_iter()
1059 struct bpf_reg_state *st = &slot->spilled_ptr; in unmark_stack_slots_iter()
1062 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); in unmark_stack_slots_iter()
1067 st->live |= REG_LIVE_WRITTEN; in unmark_stack_slots_iter()
1070 slot->slot_type[j] = STACK_INVALID; in unmark_stack_slots_iter()
1072 mark_stack_slot_scratched(env, spi - i); in unmark_stack_slots_iter()
1084 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we in is_iter_reg_valid_uninit()
1089 if (spi == -ERANGE) in is_iter_reg_valid_uninit()
1095 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_uninit()
1098 if (slot->slot_type[j] == STACK_ITER) in is_iter_reg_valid_uninit()
1113 return -EINVAL; in is_iter_reg_valid_init()
1116 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_init()
1117 struct bpf_reg_state *st = &slot->spilled_ptr; in is_iter_reg_valid_init()
1119 if (st->type & PTR_UNTRUSTED) in is_iter_reg_valid_init()
1120 return -EPROTO; in is_iter_reg_valid_init()
1122 if (i == 0 && !st->ref_obj_id) in is_iter_reg_valid_init()
1123 return -EINVAL; in is_iter_reg_valid_init()
1124 if (i != 0 && st->ref_obj_id) in is_iter_reg_valid_init()
1125 return -EINVAL; in is_iter_reg_valid_init()
1126 if (st->iter.btf != btf || st->iter.btf_id != btf_id) in is_iter_reg_valid_init()
1127 return -EINVAL; in is_iter_reg_valid_init()
1130 if (slot->slot_type[j] != STACK_ITER) in is_iter_reg_valid_init()
1131 return -EINVAL; in is_iter_reg_valid_init()
1138 * - spilled register state (STACK_SPILL);
1139 * - dynptr state (STACK_DYNPTR);
1140 * - iter state (STACK_ITER).
1144 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1166 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1171 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1172 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg()
1177 return stack->slot_type[0] == STACK_SPILL && in is_spilled_scalar_reg64()
1178 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg64()
1185 * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
1191 if (env->allow_ptr_leaks && *stype == STACK_INVALID) in mark_stack_slot_misc()
1255 memset(arr + old_n * size, 0, (new_n - old_n) * size); in realloc_array()
1263 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, in copy_reference_state()
1265 if (!dst->refs) in copy_reference_state()
1266 return -ENOMEM; in copy_reference_state()
1268 dst->acquired_refs = src->acquired_refs; in copy_reference_state()
1274 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1276 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), in copy_stack_state()
1278 if (!dst->stack) in copy_stack_state()
1279 return -ENOMEM; in copy_stack_state()
1281 dst->allocated_stack = src->allocated_stack; in copy_stack_state()
1287 state->refs = realloc_array(state->refs, state->acquired_refs, n, in resize_reference_state()
1289 if (!state->refs) in resize_reference_state()
1290 return -ENOMEM; in resize_reference_state()
1292 state->acquired_refs = n; in resize_reference_state()
1296 /* Possibly update state->allocated_stack to be at least size bytes. Also
1297 * possibly update the function's high-water mark in its bpf_subprog_info.
1301 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; in grow_stack_state()
1310 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); in grow_stack_state()
1311 if (!state->stack) in grow_stack_state()
1312 return -ENOMEM; in grow_stack_state()
1314 state->allocated_stack = size; in grow_stack_state()
1317 if (env->subprog_info[state->subprogno].stack_depth < size) in grow_stack_state()
1318 env->subprog_info[state->subprogno].stack_depth = size; in grow_stack_state()
1323 /* Acquire a pointer id from the env and update the state->refs to include
1331 int new_ofs = state->acquired_refs; in acquire_reference_state()
1334 err = resize_reference_state(state, state->acquired_refs + 1); in acquire_reference_state()
1337 id = ++env->id_gen; in acquire_reference_state()
1338 state->refs[new_ofs].id = id; in acquire_reference_state()
1339 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
1340 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; in acquire_reference_state()
1350 last_idx = state->acquired_refs - 1; in release_reference_state()
1351 for (i = 0; i < state->acquired_refs; i++) { in release_reference_state()
1352 if (state->refs[i].id == ptr_id) { in release_reference_state()
1354 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) in release_reference_state()
1355 return -EINVAL; in release_reference_state()
1357 memcpy(&state->refs[i], &state->refs[last_idx], in release_reference_state()
1358 sizeof(*state->refs)); in release_reference_state()
1359 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
1360 state->acquired_refs--; in release_reference_state()
1364 return -EINVAL; in release_reference_state()
1371 kfree(state->refs); in free_func_state()
1372 kfree(state->stack); in free_func_state()
1378 kfree(state->jmp_history); in clear_jmp_history()
1379 state->jmp_history = NULL; in clear_jmp_history()
1380 state->jmp_history_cnt = 0; in clear_jmp_history()
1388 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
1389 free_func_state(state->frame[i]); in free_verifier_state()
1390 state->frame[i] = NULL; in free_verifier_state()
1418 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, in copy_verifier_state()
1419 src->jmp_history_cnt, sizeof(*dst_state->jmp_history), in copy_verifier_state()
1421 if (!dst_state->jmp_history) in copy_verifier_state()
1422 return -ENOMEM; in copy_verifier_state()
1423 dst_state->jmp_history_cnt = src->jmp_history_cnt; in copy_verifier_state()
1428 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { in copy_verifier_state()
1429 free_func_state(dst_state->frame[i]); in copy_verifier_state()
1430 dst_state->frame[i] = NULL; in copy_verifier_state()
1432 dst_state->speculative = src->speculative; in copy_verifier_state()
1433 dst_state->active_rcu_lock = src->active_rcu_lock; in copy_verifier_state()
1434 dst_state->active_preempt_lock = src->active_preempt_lock; in copy_verifier_state()
1435 dst_state->in_sleepable = src->in_sleepable; in copy_verifier_state()
1436 dst_state->curframe = src->curframe; in copy_verifier_state()
1437 dst_state->active_lock.ptr = src->active_lock.ptr; in copy_verifier_state()
1438 dst_state->active_lock.id = src->active_lock.id; in copy_verifier_state()
1439 dst_state->branches = src->branches; in copy_verifier_state()
1440 dst_state->parent = src->parent; in copy_verifier_state()
1441 dst_state->first_insn_idx = src->first_insn_idx; in copy_verifier_state()
1442 dst_state->last_insn_idx = src->last_insn_idx; in copy_verifier_state()
1443 dst_state->dfs_depth = src->dfs_depth; in copy_verifier_state()
1444 dst_state->callback_unroll_depth = src->callback_unroll_depth; in copy_verifier_state()
1445 dst_state->used_as_loop_entry = src->used_as_loop_entry; in copy_verifier_state()
1446 dst_state->may_goto_depth = src->may_goto_depth; in copy_verifier_state()
1447 for (i = 0; i <= src->curframe; i++) { in copy_verifier_state()
1448 dst = dst_state->frame[i]; in copy_verifier_state()
1452 return -ENOMEM; in copy_verifier_state()
1453 dst_state->frame[i] = dst; in copy_verifier_state()
1455 err = copy_func_state(dst, src->frame[i]); in copy_verifier_state()
1464 return env->prog->len; in state_htab_size()
1469 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
1470 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state()
1472 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
1479 if (a->curframe != b->curframe) in same_callsites()
1482 for (fr = a->curframe; fr >= 0; fr--) in same_callsites()
1483 if (a->frame[fr]->callsite != b->frame[fr]->callsite) in same_callsites()
1489 /* Open coded iterators allow back-edges in the state graph in order to
1493 * part of some loops in order to decide whether non-exact states
1495 * - non-exact states comparison establishes sub-state relation and uses
1498 * - exact states comparison just checks if current and explored states
1499 * are identical (and thus form a back-edge).
1523 * ... ... .---------> hdr
1526 * cur .-> succ | .------...
1529 * succ '-- cur | ... ...
1532 * | succ <- cur
1537 * '----'
1549 * .------... .------...
1552 * .-> hdr ... ... ...
1555 * | succ <- cur succ <- cur
1560 * '----' exit
1604 * - use st->branch == 0 as a signal that DFS of succ had been finished
1607 * - use st->branch > 0 as a signal that st is in the current DFS path;
1608 * - handle cases B and C in is_state_visited();
1609 * - update topmost loop entry for intermediate states in get_loop_entry().
1613 struct bpf_verifier_state *topmost = st->loop_entry, *old; in get_loop_entry()
1615 while (topmost && topmost->loop_entry && topmost != topmost->loop_entry) in get_loop_entry()
1616 topmost = topmost->loop_entry; in get_loop_entry()
1620 while (st && st->loop_entry != topmost) { in get_loop_entry()
1621 old = st->loop_entry; in get_loop_entry()
1622 st->loop_entry = topmost; in get_loop_entry()
1634 /* The head1->branches check decides between cases B and C in in update_loop_entry()
1635 * comment for get_loop_entry(). If hdr1->branches == 0 then in update_loop_entry()
1638 * no need to update cur->loop_entry. in update_loop_entry()
1640 if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) { in update_loop_entry()
1641 cur->loop_entry = hdr; in update_loop_entry()
1642 hdr->used_as_loop_entry = true; in update_loop_entry()
1649 u32 br = --st->branches; in update_branch_counts()
1656 if (br == 0 && st->parent && st->loop_entry) in update_branch_counts()
1657 update_loop_entry(st->parent, st->loop_entry); in update_branch_counts()
1667 st = st->parent; in update_branch_counts()
1674 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
1675 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
1678 if (env->head == NULL) in pop_stack()
1679 return -ENOENT; in pop_stack()
1682 err = copy_verifier_state(cur, &head->st); in pop_stack()
1687 bpf_vlog_reset(&env->log, head->log_pos); in pop_stack()
1689 *insn_idx = head->insn_idx; in pop_stack()
1691 *prev_insn_idx = head->prev_insn_idx; in pop_stack()
1692 elem = head->next; in pop_stack()
1693 free_verifier_state(&head->st, false); in pop_stack()
1695 env->head = elem; in pop_stack()
1696 env->stack_size--; in pop_stack()
1704 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
1712 elem->insn_idx = insn_idx; in push_stack()
1713 elem->prev_insn_idx = prev_insn_idx; in push_stack()
1714 elem->next = env->head; in push_stack()
1715 elem->log_pos = env->log.end_pos; in push_stack()
1716 env->head = elem; in push_stack()
1717 env->stack_size++; in push_stack()
1718 err = copy_verifier_state(&elem->st, cur); in push_stack()
1721 elem->st.speculative |= speculative; in push_stack()
1722 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
1724 env->stack_size); in push_stack()
1727 if (elem->st.parent) { in push_stack()
1728 ++elem->st.parent->branches; in push_stack()
1731 * 1. speculative states will bump 'branches' for non-branch in push_stack()
1739 return &elem->st; in push_stack()
1741 free_verifier_state(env->cur_state, true); in push_stack()
1742 env->cur_state = NULL; in push_stack()
1753 /* This helper doesn't clear reg->id */
1756 reg->var_off = tnum_const(imm); in ___mark_reg_known()
1757 reg->smin_value = (s64)imm; in ___mark_reg_known()
1758 reg->smax_value = (s64)imm; in ___mark_reg_known()
1759 reg->umin_value = imm; in ___mark_reg_known()
1760 reg->umax_value = imm; in ___mark_reg_known()
1762 reg->s32_min_value = (s32)imm; in ___mark_reg_known()
1763 reg->s32_max_value = (s32)imm; in ___mark_reg_known()
1764 reg->u32_min_value = (u32)imm; in ___mark_reg_known()
1765 reg->u32_max_value = (u32)imm; in ___mark_reg_known()
1774 memset(((u8 *)reg) + sizeof(reg->type), 0, in __mark_reg_known()
1775 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); in __mark_reg_known()
1776 reg->id = 0; in __mark_reg_known()
1777 reg->ref_obj_id = 0; in __mark_reg_known()
1783 reg->var_off = tnum_const_subreg(reg->var_off, imm); in __mark_reg32_known()
1784 reg->s32_min_value = (s32)imm; in __mark_reg32_known()
1785 reg->s32_max_value = (s32)imm; in __mark_reg32_known()
1786 reg->u32_min_value = (u32)imm; in __mark_reg32_known()
1787 reg->u32_max_value = (u32)imm; in __mark_reg32_known()
1801 reg->type = SCALAR_VALUE; in __mark_reg_const_zero()
1805 reg->precise = !env->bpf_capable; in __mark_reg_const_zero()
1824 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for in __mark_dynptr_reg()
1829 reg->type = CONST_PTR_TO_DYNPTR; in __mark_dynptr_reg()
1831 reg->id = dynptr_id; in __mark_dynptr_reg()
1832 reg->dynptr.type = type; in __mark_dynptr_reg()
1833 reg->dynptr.first_slot = first_slot; in __mark_dynptr_reg()
1838 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { in mark_ptr_not_null_reg()
1839 const struct bpf_map *map = reg->map_ptr; in mark_ptr_not_null_reg()
1841 if (map->inner_map_meta) { in mark_ptr_not_null_reg()
1842 reg->type = CONST_PTR_TO_MAP; in mark_ptr_not_null_reg()
1843 reg->map_ptr = map->inner_map_meta; in mark_ptr_not_null_reg()
1847 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) in mark_ptr_not_null_reg()
1848 reg->map_uid = reg->id; in mark_ptr_not_null_reg()
1849 if (btf_record_has_field(map->inner_map_meta->record, BPF_WORKQUEUE)) in mark_ptr_not_null_reg()
1850 reg->map_uid = reg->id; in mark_ptr_not_null_reg()
1851 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { in mark_ptr_not_null_reg()
1852 reg->type = PTR_TO_XDP_SOCK; in mark_ptr_not_null_reg()
1853 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || in mark_ptr_not_null_reg()
1854 map->map_type == BPF_MAP_TYPE_SOCKHASH) { in mark_ptr_not_null_reg()
1855 reg->type = PTR_TO_SOCKET; in mark_ptr_not_null_reg()
1857 reg->type = PTR_TO_MAP_VALUE; in mark_ptr_not_null_reg()
1862 reg->type &= ~PTR_MAYBE_NULL; in mark_ptr_not_null_reg()
1870 regs[regno].btf = ds_head->btf; in mark_reg_graph_node()
1871 regs[regno].btf_id = ds_head->value_btf_id; in mark_reg_graph_node()
1872 regs[regno].off = ds_head->node_offset; in mark_reg_graph_node()
1877 return type_is_pkt_pointer(reg->type); in reg_is_pkt_pointer()
1883 reg->type == PTR_TO_PACKET_END; in reg_is_pkt_pointer_any()
1888 return base_type(reg->type) == PTR_TO_MEM && in reg_is_dynptr_slice_pkt()
1889 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); in reg_is_dynptr_slice_pkt()
1900 return reg->type == which && in reg_is_init_pkt_pointer()
1901 reg->id == 0 && in reg_is_init_pkt_pointer()
1902 reg->off == 0 && in reg_is_init_pkt_pointer()
1903 tnum_equals_const(reg->var_off, 0); in reg_is_init_pkt_pointer()
1909 reg->smin_value = S64_MIN; in __mark_reg_unbounded()
1910 reg->smax_value = S64_MAX; in __mark_reg_unbounded()
1911 reg->umin_value = 0; in __mark_reg_unbounded()
1912 reg->umax_value = U64_MAX; in __mark_reg_unbounded()
1914 reg->s32_min_value = S32_MIN; in __mark_reg_unbounded()
1915 reg->s32_max_value = S32_MAX; in __mark_reg_unbounded()
1916 reg->u32_min_value = 0; in __mark_reg_unbounded()
1917 reg->u32_max_value = U32_MAX; in __mark_reg_unbounded()
1922 reg->smin_value = S64_MIN; in __mark_reg64_unbounded()
1923 reg->smax_value = S64_MAX; in __mark_reg64_unbounded()
1924 reg->umin_value = 0; in __mark_reg64_unbounded()
1925 reg->umax_value = U64_MAX; in __mark_reg64_unbounded()
1930 reg->s32_min_value = S32_MIN; in __mark_reg32_unbounded()
1931 reg->s32_max_value = S32_MAX; in __mark_reg32_unbounded()
1932 reg->u32_min_value = 0; in __mark_reg32_unbounded()
1933 reg->u32_max_value = U32_MAX; in __mark_reg32_unbounded()
1938 struct tnum var32_off = tnum_subreg(reg->var_off); in __update_reg32_bounds()
1941 reg->s32_min_value = max_t(s32, reg->s32_min_value, in __update_reg32_bounds()
1944 reg->s32_max_value = min_t(s32, reg->s32_max_value, in __update_reg32_bounds()
1946 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); in __update_reg32_bounds()
1947 reg->u32_max_value = min(reg->u32_max_value, in __update_reg32_bounds()
1954 reg->smin_value = max_t(s64, reg->smin_value, in __update_reg64_bounds()
1955 reg->var_off.value | (reg->var_off.mask & S64_MIN)); in __update_reg64_bounds()
1957 reg->smax_value = min_t(s64, reg->smax_value, in __update_reg64_bounds()
1958 reg->var_off.value | (reg->var_off.mask & S64_MAX)); in __update_reg64_bounds()
1959 reg->umin_value = max(reg->umin_value, reg->var_off.value); in __update_reg64_bounds()
1960 reg->umax_value = min(reg->umax_value, in __update_reg64_bounds()
1961 reg->var_off.value | reg->var_off.mask); in __update_reg64_bounds()
1970 /* Uses signed min/max values to inform unsigned, and vice-versa */
1978 * [10, 20] range. But this property holds for any 64-bit range as in __reg32_deduce_bounds()
1989 * depends on actual hexadecimal values of 32-bit range. They can form in __reg32_deduce_bounds()
1994 if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) { in __reg32_deduce_bounds()
1998 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value); in __reg32_deduce_bounds()
1999 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value); in __reg32_deduce_bounds()
2001 if ((s32)reg->umin_value <= (s32)reg->umax_value) { in __reg32_deduce_bounds()
2002 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2003 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2006 if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) { in __reg32_deduce_bounds()
2008 if ((u32)reg->smin_value <= (u32)reg->smax_value) { in __reg32_deduce_bounds()
2009 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value); in __reg32_deduce_bounds()
2010 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value); in __reg32_deduce_bounds()
2013 if ((s32)reg->smin_value <= (s32)reg->smax_value) { in __reg32_deduce_bounds()
2014 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2015 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2019 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to in __reg32_deduce_bounds()
2022 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]). in __reg32_deduce_bounds()
2023 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff, in __reg32_deduce_bounds()
2025 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]). in __reg32_deduce_bounds()
2029 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister. in __reg32_deduce_bounds()
2031 if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) && in __reg32_deduce_bounds()
2032 (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) { in __reg32_deduce_bounds()
2033 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2034 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2036 if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) && in __reg32_deduce_bounds()
2037 (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) { in __reg32_deduce_bounds()
2038 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2039 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2044 if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) { in __reg32_deduce_bounds()
2045 reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2046 reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2050 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg32_deduce_bounds()
2052 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg32_deduce_bounds()
2053 reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2054 reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2065 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2073 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2074 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2077 * contiguous to the right of it, wrapping around from -1 to 0, and in __reg64_deduce_bounds()
2080 * more visually as mapped to sign-agnostic range of hex values. in __reg64_deduce_bounds()
2086 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2087 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2089 * >------------------------------ -------------------------------> in __reg64_deduce_bounds()
2099 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2105 * will be non-negative both as u64 and s64 (and in fact it will be in __reg64_deduce_bounds()
2108 * non-negative range of values larger than 0x8000000000000000. in __reg64_deduce_bounds()
2127 if ((s64)reg->umin_value <= (s64)reg->umax_value) { in __reg64_deduce_bounds()
2128 reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2129 reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2133 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg64_deduce_bounds()
2135 if ((u64)reg->smin_value <= (u64)reg->smax_value) { in __reg64_deduce_bounds()
2136 reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2137 reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2143 /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit in __reg_deduce_mixed_bounds()
2144 * values on both sides of 64-bit range in hope to have tighter range. in __reg_deduce_mixed_bounds()
2146 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff]. in __reg_deduce_mixed_bounds()
2147 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound in __reg_deduce_mixed_bounds()
2148 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of in __reg_deduce_mixed_bounds()
2149 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a in __reg_deduce_mixed_bounds()
2152 * with are well-formed ranges in respective s64 or u64 domain, just in __reg_deduce_mixed_bounds()
2153 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments. in __reg_deduce_mixed_bounds()
2158 /* u32 -> u64 tightening, it's always well-formed */ in __reg_deduce_mixed_bounds()
2159 new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2160 new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2161 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2162 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2163 /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */ in __reg_deduce_mixed_bounds()
2164 new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2165 new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2166 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2167 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2170 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg_deduce_mixed_bounds()
2171 /* s32 -> u64 tightening */ in __reg_deduce_mixed_bounds()
2172 new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; in __reg_deduce_mixed_bounds()
2173 new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; in __reg_deduce_mixed_bounds()
2174 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2175 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2176 /* s32 -> s64 tightening */ in __reg_deduce_mixed_bounds()
2177 new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; in __reg_deduce_mixed_bounds()
2178 new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; in __reg_deduce_mixed_bounds()
2179 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2180 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2184 * when upper bits for a 64-bit range are all 1s or all 0s. in __reg_deduce_mixed_bounds()
2195 * Also suppose that it's 32-bit range is positive, in __reg_deduce_mixed_bounds()
2196 * meaning that lower 32-bits of the full 64-bit register in __reg_deduce_mixed_bounds()
2204 * which means that upper bits of the full 64-bit register in __reg_deduce_mixed_bounds()
2208 * - 0xffff_ffff_8000_0000 == (s64)S32_MIN in __reg_deduce_mixed_bounds()
2209 * - 0x0000_0000_7fff_ffff == (s64)S32_MAX in __reg_deduce_mixed_bounds()
2212 if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) { in __reg_deduce_mixed_bounds()
2213 reg->smin_value = reg->s32_min_value; in __reg_deduce_mixed_bounds()
2214 reg->smax_value = reg->s32_max_value; in __reg_deduce_mixed_bounds()
2215 reg->umin_value = reg->s32_min_value; in __reg_deduce_mixed_bounds()
2216 reg->umax_value = reg->s32_max_value; in __reg_deduce_mixed_bounds()
2217 reg->var_off = tnum_intersect(reg->var_off, in __reg_deduce_mixed_bounds()
2218 tnum_range(reg->smin_value, reg->smax_value)); in __reg_deduce_mixed_bounds()
2232 struct tnum var64_off = tnum_intersect(reg->var_off, in __reg_bound_offset()
2233 tnum_range(reg->umin_value, in __reg_bound_offset()
2234 reg->umax_value)); in __reg_bound_offset()
2236 tnum_range(reg->u32_min_value, in __reg_bound_offset()
2237 reg->u32_max_value)); in __reg_bound_offset()
2239 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); in __reg_bound_offset()
2263 if (reg->umin_value > reg->umax_value || in reg_bounds_sanity_check()
2264 reg->smin_value > reg->smax_value || in reg_bounds_sanity_check()
2265 reg->u32_min_value > reg->u32_max_value || in reg_bounds_sanity_check()
2266 reg->s32_min_value > reg->s32_max_value) { in reg_bounds_sanity_check()
2271 if (tnum_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2272 u64 uval = reg->var_off.value; in reg_bounds_sanity_check()
2275 if (reg->umin_value != uval || reg->umax_value != uval || in reg_bounds_sanity_check()
2276 reg->smin_value != sval || reg->smax_value != sval) { in reg_bounds_sanity_check()
2282 if (tnum_subreg_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2283 u32 uval32 = tnum_subreg(reg->var_off).value; in reg_bounds_sanity_check()
2286 if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || in reg_bounds_sanity_check()
2287 reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { in reg_bounds_sanity_check()
2297 ctx, msg, reg->umin_value, reg->umax_value, in reg_bounds_sanity_check()
2298 reg->smin_value, reg->smax_value, in reg_bounds_sanity_check()
2299 reg->u32_min_value, reg->u32_max_value, in reg_bounds_sanity_check()
2300 reg->s32_min_value, reg->s32_max_value, in reg_bounds_sanity_check()
2301 reg->var_off.value, reg->var_off.mask); in reg_bounds_sanity_check()
2302 if (env->test_reg_invariants) in reg_bounds_sanity_check()
2303 return -EFAULT; in reg_bounds_sanity_check()
2315 reg->umin_value = reg->u32_min_value; in __reg_assign_32_into_64()
2316 reg->umax_value = reg->u32_max_value; in __reg_assign_32_into_64()
2318 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must in __reg_assign_32_into_64()
2322 if (__reg32_bound_s64(reg->s32_min_value) && in __reg_assign_32_into_64()
2323 __reg32_bound_s64(reg->s32_max_value)) { in __reg_assign_32_into_64()
2324 reg->smin_value = reg->s32_min_value; in __reg_assign_32_into_64()
2325 reg->smax_value = reg->s32_max_value; in __reg_assign_32_into_64()
2327 reg->smin_value = 0; in __reg_assign_32_into_64()
2328 reg->smax_value = U32_MAX; in __reg_assign_32_into_64()
2340 reg->type = SCALAR_VALUE; in __mark_reg_unknown_imprecise()
2341 reg->id = 0; in __mark_reg_unknown_imprecise()
2342 reg->ref_obj_id = 0; in __mark_reg_unknown_imprecise()
2343 reg->var_off = tnum_unknown; in __mark_reg_unknown_imprecise()
2344 reg->frameno = 0; in __mark_reg_unknown_imprecise()
2345 reg->precise = false; in __mark_reg_unknown_imprecise()
2356 reg->precise = !env->bpf_capable; in __mark_reg_unknown()
2380 reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); in __mark_reg_s32_range()
2381 reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); in __mark_reg_s32_range()
2383 reg->smin_value = max_t(s64, reg->smin_value, s32_min); in __mark_reg_s32_range()
2384 reg->smax_value = min_t(s64, reg->smax_value, s32_max); in __mark_reg_s32_range()
2395 reg->type = NOT_INIT; in __mark_reg_not_init()
2426 regs[regno].id = ++env->id_gen; in mark_btf_ld_reg()
2433 struct bpf_reg_state *regs = state->regs; in init_reg_state()
2446 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
2454 #define BPF_MAIN_FUNC (-1)
2459 state->callsite = callsite; in init_func_state()
2460 state->frameno = frameno; in init_func_state()
2461 state->subprogno = subprogno; in init_func_state()
2462 state->callback_ret_range = retval_range(0, 0); in init_func_state()
2479 elem->insn_idx = insn_idx; in push_async_cb()
2480 elem->prev_insn_idx = prev_insn_idx; in push_async_cb()
2481 elem->next = env->head; in push_async_cb()
2482 elem->log_pos = env->log.end_pos; in push_async_cb()
2483 env->head = elem; in push_async_cb()
2484 env->stack_size++; in push_async_cb()
2485 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_async_cb()
2488 env->stack_size); in push_async_cb()
2496 elem->st.branches = 1; in push_async_cb()
2497 elem->st.in_sleepable = is_sleepable; in push_async_cb()
2505 elem->st.frame[0] = frame; in push_async_cb()
2506 return &elem->st; in push_async_cb()
2508 free_verifier_state(env->cur_state, true); in push_async_cb()
2509 env->cur_state = NULL; in push_async_cb()
2524 return ((struct bpf_subprog_info *)a)->start - in cmp_subprogs()
2525 ((struct bpf_subprog_info *)b)->start; in cmp_subprogs()
2532 p = bsearch(&off, env->subprog_info, env->subprog_cnt, in find_subprog()
2533 sizeof(env->subprog_info[0]), cmp_subprogs); in find_subprog()
2535 return -ENOENT; in find_subprog()
2536 return p - env->subprog_info; in find_subprog()
2542 int insn_cnt = env->prog->len; in add_subprog()
2547 return -EINVAL; in add_subprog()
2552 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
2554 return -E2BIG; in add_subprog()
2557 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
2558 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
2559 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
2560 return env->subprog_cnt - 1; in add_subprog()
2565 struct bpf_prog_aux *aux = env->prog->aux; in bpf_find_exception_callback_insn_off()
2566 struct btf *btf = aux->btf; in bpf_find_exception_callback_insn_off()
2572 /* Non-zero func_info_cnt implies valid btf */ in bpf_find_exception_callback_insn_off()
2573 if (!aux->func_info_cnt) in bpf_find_exception_callback_insn_off()
2575 main_btf_id = aux->func_info[0].type_id; in bpf_find_exception_callback_insn_off()
2580 return -EINVAL; in bpf_find_exception_callback_insn_off()
2583 name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); in bpf_find_exception_callback_insn_off()
2587 if (ret == -ENOENT) in bpf_find_exception_callback_insn_off()
2589 else if (ret == -EEXIST) in bpf_find_exception_callback_insn_off()
2603 return -EINVAL; in bpf_find_exception_callback_insn_off()
2606 for (i = 0; i < aux->func_info_cnt; i++) { in bpf_find_exception_callback_insn_off()
2607 if (aux->func_info[i].type_id != id) in bpf_find_exception_callback_insn_off()
2609 ret = aux->func_info[i].insn_off; in bpf_find_exception_callback_insn_off()
2615 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
2620 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
2663 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; in kfunc_desc_cmp_by_id_off()
2671 return d0->offset - d1->offset; in kfunc_btf_cmp_by_off()
2683 tab = prog->aux->kfunc_tab; in find_kfunc_desc()
2684 return bsearch(&desc, tab->descs, tab->nr_descs, in find_kfunc_desc()
2685 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); in find_kfunc_desc()
2695 return -EFAULT; in bpf_get_kfunc_addr()
2697 *func_addr = (u8 *)desc->addr; in bpf_get_kfunc_addr()
2711 tab = env->prog->aux->kfunc_btf_tab; in __find_kfunc_desc_btf()
2712 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, in __find_kfunc_desc_btf()
2713 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); in __find_kfunc_desc_btf()
2715 if (tab->nr_descs == MAX_KFUNC_BTFS) { in __find_kfunc_desc_btf()
2717 return ERR_PTR(-E2BIG); in __find_kfunc_desc_btf()
2720 if (bpfptr_is_null(env->fd_array)) { in __find_kfunc_desc_btf()
2722 return ERR_PTR(-EPROTO); in __find_kfunc_desc_btf()
2725 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, in __find_kfunc_desc_btf()
2728 return ERR_PTR(-EFAULT); in __find_kfunc_desc_btf()
2739 return ERR_PTR(-EINVAL); in __find_kfunc_desc_btf()
2745 return ERR_PTR(-ENXIO); in __find_kfunc_desc_btf()
2748 b = &tab->descs[tab->nr_descs++]; in __find_kfunc_desc_btf()
2749 b->btf = btf; in __find_kfunc_desc_btf()
2750 b->module = mod; in __find_kfunc_desc_btf()
2751 b->offset = offset; in __find_kfunc_desc_btf()
2756 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in __find_kfunc_desc_btf()
2759 btf = b->btf; in __find_kfunc_desc_btf()
2770 while (tab->nr_descs--) { in bpf_free_kfunc_btf_tab()
2771 module_put(tab->descs[tab->nr_descs].module); in bpf_free_kfunc_btf_tab()
2772 btf_put(tab->descs[tab->nr_descs].btf); in bpf_free_kfunc_btf_tab()
2785 return ERR_PTR(-EINVAL); in find_kfunc_desc_btf()
2790 return btf_vmlinux ?: ERR_PTR(-ENOENT); in find_kfunc_desc_btf()
2806 prog_aux = env->prog->aux; in add_kfunc_call()
2807 tab = prog_aux->kfunc_tab; in add_kfunc_call()
2808 btf_tab = prog_aux->kfunc_btf_tab; in add_kfunc_call()
2812 return -ENOTSUPP; in add_kfunc_call()
2815 if (!env->prog->jit_requested) { in add_kfunc_call()
2817 return -ENOTSUPP; in add_kfunc_call()
2822 return -ENOTSUPP; in add_kfunc_call()
2825 if (!env->prog->gpl_compatible) { in add_kfunc_call()
2826 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); in add_kfunc_call()
2827 return -EINVAL; in add_kfunc_call()
2832 return -ENOMEM; in add_kfunc_call()
2833 prog_aux->kfunc_tab = tab; in add_kfunc_call()
2848 return -ENOMEM; in add_kfunc_call()
2849 prog_aux->kfunc_btf_tab = btf_tab; in add_kfunc_call()
2858 if (find_kfunc_desc(env->prog, func_id, offset)) in add_kfunc_call()
2861 if (tab->nr_descs == MAX_KFUNC_DESCS) { in add_kfunc_call()
2863 return -E2BIG; in add_kfunc_call()
2870 return -EINVAL; in add_kfunc_call()
2872 func_proto = btf_type_by_id(desc_btf, func->type); in add_kfunc_call()
2876 return -EINVAL; in add_kfunc_call()
2879 func_name = btf_name_by_offset(desc_btf, func->name_off); in add_kfunc_call()
2884 return -EINVAL; in add_kfunc_call()
2892 /* Check whether the relative offset overflows desc->imm */ in add_kfunc_call()
2896 return -EINVAL; in add_kfunc_call()
2901 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); in add_kfunc_call()
2906 desc = &tab->descs[tab->nr_descs++]; in add_kfunc_call()
2907 desc->func_id = func_id; in add_kfunc_call()
2908 desc->imm = call_imm; in add_kfunc_call()
2909 desc->offset = offset; in add_kfunc_call()
2910 desc->addr = addr; in add_kfunc_call()
2911 err = btf_distill_func_proto(&env->log, desc_btf, in add_kfunc_call()
2913 &desc->func_model); in add_kfunc_call()
2915 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in add_kfunc_call()
2925 if (d0->imm != d1->imm) in kfunc_desc_cmp_by_imm_off()
2926 return d0->imm < d1->imm ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
2927 if (d0->offset != d1->offset) in kfunc_desc_cmp_by_imm_off()
2928 return d0->offset < d1->offset ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
2936 tab = prog->aux->kfunc_tab; in sort_kfunc_descs_by_imm_off()
2940 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in sort_kfunc_descs_by_imm_off()
2946 return !!prog->aux->kfunc_tab; in bpf_prog_has_kfunc_call()
2954 .imm = insn->imm, in bpf_jit_find_kfunc_model()
2955 .offset = insn->off, in bpf_jit_find_kfunc_model()
2960 tab = prog->aux->kfunc_tab; in bpf_jit_find_kfunc_model()
2961 res = bsearch(&desc, tab->descs, tab->nr_descs, in bpf_jit_find_kfunc_model()
2962 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); in bpf_jit_find_kfunc_model()
2964 return res ? &res->func_model : NULL; in bpf_jit_find_kfunc_model()
2969 struct bpf_subprog_info *subprog = env->subprog_info; in add_subprog_and_kfunc()
2970 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; in add_subprog_and_kfunc()
2971 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc()
2983 if (!env->bpf_capable) { in add_subprog_and_kfunc()
2985 return -EPERM; in add_subprog_and_kfunc()
2989 ret = add_subprog(env, i + insn->imm + 1); in add_subprog_and_kfunc()
2991 ret = add_kfunc_call(env, insn->imm, insn->off); in add_subprog_and_kfunc()
3009 for (i = 1; i < env->subprog_cnt; i++) { in add_subprog_and_kfunc()
3010 if (env->subprog_info[i].start != ex_cb_insn) in add_subprog_and_kfunc()
3012 env->exception_callback_subprog = i; in add_subprog_and_kfunc()
3021 subprog[env->subprog_cnt].start = insn_cnt; in add_subprog_and_kfunc()
3023 if (env->log.level & BPF_LOG_LEVEL2) in add_subprog_and_kfunc()
3024 for (i = 0; i < env->subprog_cnt; i++) in add_subprog_and_kfunc()
3033 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
3034 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
3035 int insn_cnt = env->prog->len; in check_subprogs()
3062 return -EINVAL; in check_subprogs()
3065 if (i == subprog_end - 1) { in check_subprogs()
3066 /* to avoid fall-through from one subprog into another in check_subprogs()
3074 return -EINVAL; in check_subprogs()
3078 if (cur_subprog < env->subprog_cnt) in check_subprogs()
3086 * issues like callee-saved registers, stack slot allocation time, etc.
3092 bool writes = parent == state->parent; /* Observe write marks */ in mark_reg_read()
3097 if (writes && state->live & REG_LIVE_WRITTEN) in mark_reg_read()
3099 if (parent->live & REG_LIVE_DONE) { in mark_reg_read()
3101 reg_type_str(env, parent->type), in mark_reg_read()
3102 parent->var_off.value, parent->off); in mark_reg_read()
3103 return -EFAULT; in mark_reg_read()
3108 if ((parent->live & REG_LIVE_READ) == flag || in mark_reg_read()
3109 parent->live & REG_LIVE_READ64) in mark_reg_read()
3113 * keep re-marking all parents as LIVE_READ. in mark_reg_read()
3115 * multiple times without writes into it in-between. in mark_reg_read()
3121 parent->live |= flag; in mark_reg_read()
3124 parent->live &= ~REG_LIVE_READ32; in mark_reg_read()
3126 parent = state->parent; in mark_reg_read()
3131 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
3132 env->longest_mark_read_walk = cnt; in mark_reg_read()
3145 if (reg->type == CONST_PTR_TO_DYNPTR) in mark_dynptr_read()
3154 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, in mark_dynptr_read()
3155 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); in mark_dynptr_read()
3158 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, in mark_dynptr_read()
3159 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64); in mark_dynptr_read()
3169 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; in mark_iter_read()
3171 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); in mark_iter_read()
3175 mark_stack_slot_scratched(env, spi - i); in mark_iter_read()
3181 /* This function is supposed to be used by the following 32-bit optimization
3183 * on 64-bit, otherwise return FALSE.
3190 code = insn->code; in is_reg64()
3205 if (insn->src_reg == BPF_PSEUDO_CALL) in is_reg64()
3217 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) in is_reg64()
3221 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) in is_reg64()
3239 if (t == SRC_OP && reg->type != SCALAR_VALUE) in is_reg64()
3251 /* Both LD_IND and LD_ABS return 32-bit data. */ in is_reg64()
3271 /* Return the regno defined by the insn, or -1. */
3274 switch (BPF_CLASS(insn->code)) { in insn_def_regno()
3278 return -1; in insn_def_regno()
3280 if ((BPF_MODE(insn->code) == BPF_ATOMIC || in insn_def_regno()
3281 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) && in insn_def_regno()
3282 (insn->imm & BPF_FETCH)) { in insn_def_regno()
3283 if (insn->imm == BPF_CMPXCHG) in insn_def_regno()
3286 return insn->src_reg; in insn_def_regno()
3288 return -1; in insn_def_regno()
3291 return insn->dst_reg; in insn_def_regno()
3295 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3300 if (dst_reg == -1) in insn_has_def32()
3309 s32 def_idx = reg->subreg_def; in mark_insn_zext()
3314 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
3315 /* The dst will be zero extended, so won't be sub-register anymore. */ in mark_insn_zext()
3316 reg->subreg_def = DEF_NOT_SUBREG; in mark_insn_zext()
3322 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in __check_reg_arg()
3328 return -EINVAL; in __check_reg_arg()
3337 if (reg->type == NOT_INIT) { in __check_reg_arg()
3339 return -EACCES; in __check_reg_arg()
3341 /* We don't need to worry about FP liveness because it's read-only */ in __check_reg_arg()
3348 return mark_reg_read(env, reg, reg->parent, in __check_reg_arg()
3354 return -EACCES; in __check_reg_arg()
3356 reg->live |= REG_LIVE_WRITTEN; in __check_reg_arg()
3357 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in __check_reg_arg()
3367 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
3368 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg()
3370 return __check_reg_arg(env, state->regs, regno, t); in check_reg_arg()
3390 env->insn_aux_data[idx].jmp_point = true; in mark_jmp_point()
3395 return env->insn_aux_data[insn_idx].jmp_point; in is_jmp_point()
3402 #define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1)
3403 #define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1)
3404 #define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1)
3425 if (s->cnt < LINKED_REGS_MAX) in linked_regs_push()
3426 return &s->entries[s->cnt++]; in linked_regs_push()
3431 /* Use u64 as a vector of 6 10-bit values, use first 4-bits to track
3434 * - 3-bits frameno
3435 * - 6-bits spi_or_reg
3436 * - 1-bit is_reg
3443 for (i = 0; i < s->cnt; ++i) { in linked_regs_pack()
3444 struct linked_reg *e = &s->entries[i]; in linked_regs_pack()
3447 tmp |= e->frameno; in linked_regs_pack()
3448 tmp |= e->spi << LR_SPI_OFF; in linked_regs_pack()
3449 tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF; in linked_regs_pack()
3455 val |= s->cnt; in linked_regs_pack()
3463 s->cnt = val & LR_SIZE_MASK; in linked_regs_unpack()
3466 for (i = 0; i < s->cnt; ++i) { in linked_regs_unpack()
3467 struct linked_reg *e = &s->entries[i]; in linked_regs_unpack()
3469 e->frameno = val & LR_FRAMENO_MASK; in linked_regs_unpack()
3470 e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK; in linked_regs_unpack()
3471 e->is_reg = (val >> LR_IS_REG_OFF) & 0x1; in linked_regs_unpack()
3480 u32 cnt = cur->jmp_history_cnt; in push_jmp_history()
3485 if (env->cur_hist_ent) { in push_jmp_history()
3489 WARN_ONCE((env->cur_hist_ent->flags & insn_flags) && in push_jmp_history()
3490 (env->cur_hist_ent->flags & insn_flags) != insn_flags, in push_jmp_history()
3492 env->insn_idx, env->cur_hist_ent->flags, insn_flags); in push_jmp_history()
3493 env->cur_hist_ent->flags |= insn_flags; in push_jmp_history()
3494 WARN_ONCE(env->cur_hist_ent->linked_regs != 0, in push_jmp_history()
3496 env->insn_idx, env->cur_hist_ent->linked_regs); in push_jmp_history()
3497 env->cur_hist_ent->linked_regs = linked_regs; in push_jmp_history()
3503 p = krealloc(cur->jmp_history, alloc_size, GFP_USER); in push_jmp_history()
3505 return -ENOMEM; in push_jmp_history()
3506 cur->jmp_history = p; in push_jmp_history()
3508 p = &cur->jmp_history[cnt - 1]; in push_jmp_history()
3509 p->idx = env->insn_idx; in push_jmp_history()
3510 p->prev_idx = env->prev_insn_idx; in push_jmp_history()
3511 p->flags = insn_flags; in push_jmp_history()
3512 p->linked_regs = linked_regs; in push_jmp_history()
3513 cur->jmp_history_cnt = cnt; in push_jmp_history()
3514 env->cur_hist_ent = p; in push_jmp_history()
3522 if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx) in get_jmp_hist_entry()
3523 return &st->jmp_history[hist_end - 1]; in get_jmp_hist_entry()
3529 * Return -ENOENT if we exhausted all instructions within given state.
3532 * insn index within the same state, e.g.: 3->4->5->3, so just because current
3545 if (i == st->first_insn_idx) { in get_prev_insn_idx()
3547 return -ENOENT; in get_prev_insn_idx()
3548 if (cnt == 1 && st->jmp_history[0].idx == i) in get_prev_insn_idx()
3549 return -ENOENT; in get_prev_insn_idx()
3552 if (cnt && st->jmp_history[cnt - 1].idx == i) { in get_prev_insn_idx()
3553 i = st->jmp_history[cnt - 1].prev_idx; in get_prev_insn_idx()
3554 (*history)--; in get_prev_insn_idx()
3556 i--; in get_prev_insn_idx()
3566 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) in disasm_kfunc_name()
3569 desc_btf = find_kfunc_desc_btf(data, insn->off); in disasm_kfunc_name()
3573 func = btf_type_by_id(desc_btf, insn->imm); in disasm_kfunc_name()
3574 return btf_name_by_offset(desc_btf, func->name_off); in disasm_kfunc_name()
3579 bt->frame = frame; in bt_init()
3584 struct bpf_verifier_env *env = bt->env; in bt_reset()
3587 bt->env = env; in bt_reset()
3595 for (i = 0; i <= bt->frame; i++) in bt_empty()
3596 mask |= bt->reg_masks[i] | bt->stack_masks[i]; in bt_empty()
3603 if (bt->frame == MAX_CALL_FRAMES - 1) { in bt_subprog_enter()
3604 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); in bt_subprog_enter()
3606 return -EFAULT; in bt_subprog_enter()
3608 bt->frame++; in bt_subprog_enter()
3614 if (bt->frame == 0) { in bt_subprog_exit()
3615 verbose(bt->env, "BUG subprog exit from frame 0\n"); in bt_subprog_exit()
3617 return -EFAULT; in bt_subprog_exit()
3619 bt->frame--; in bt_subprog_exit()
3625 bt->reg_masks[frame] |= 1 << reg; in bt_set_frame_reg()
3630 bt->reg_masks[frame] &= ~(1 << reg); in bt_clear_frame_reg()
3635 bt_set_frame_reg(bt, bt->frame, reg); in bt_set_reg()
3640 bt_clear_frame_reg(bt, bt->frame, reg); in bt_clear_reg()
3645 bt->stack_masks[frame] |= 1ull << slot; in bt_set_frame_slot()
3650 bt->stack_masks[frame] &= ~(1ull << slot); in bt_clear_frame_slot()
3655 return bt->reg_masks[frame]; in bt_frame_reg_mask()
3660 return bt->reg_masks[bt->frame]; in bt_reg_mask()
3665 return bt->stack_masks[frame]; in bt_frame_stack_mask()
3670 return bt->stack_masks[bt->frame]; in bt_stack_mask()
3675 return bt->reg_masks[bt->frame] & (1 << reg); in bt_is_reg_set()
3680 return bt->reg_masks[frame] & (1 << reg); in bt_is_frame_reg_set()
3685 return bt->stack_masks[frame] & (1ull << slot); in bt_is_frame_slot_set()
3702 buf_sz -= n; in fmt_reg_mask()
3707 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
3718 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); in fmt_stack_mask()
3721 buf_sz -= n; in fmt_stack_mask()
3727 /* If any register R in hist->linked_regs is marked as precise in bt,
3728 * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
3736 if (!hist || hist->linked_regs == 0) in bt_sync_linked_regs()
3739 linked_regs_unpack(hist->linked_regs, &linked_regs); in bt_sync_linked_regs()
3743 if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) || in bt_sync_linked_regs()
3744 (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) { in bt_sync_linked_regs()
3756 if (e->is_reg) in bt_sync_linked_regs()
3757 bt_set_frame_reg(bt, e->frameno, e->regno); in bt_sync_linked_regs()
3759 bt_set_frame_slot(bt, e->frameno, e->spi); in bt_sync_linked_regs()
3771 * - *would be* executed next, if jump history is viewed in forward order;
3772 * - *was* processed previously during backtracking.
3782 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
3783 u8 class = BPF_CLASS(insn->code); in backtrack_insn()
3784 u8 opcode = BPF_OP(insn->code); in backtrack_insn()
3785 u8 mode = BPF_MODE(insn->code); in backtrack_insn()
3786 u32 dreg = insn->dst_reg; in backtrack_insn()
3787 u32 sreg = insn->src_reg; in backtrack_insn()
3790 if (insn->code == 0) in backtrack_insn()
3792 if (env->log.level & BPF_LOG_LEVEL2) { in backtrack_insn()
3793 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); in backtrack_insn()
3795 bt->frame, env->tmp_str_buf); in backtrack_insn()
3796 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); in backtrack_insn()
3797 verbose(env, "stack=%s before ", env->tmp_str_buf); in backtrack_insn()
3799 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
3817 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
3835 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
3857 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
3859 /* dreg = *(u64 *)[fp - off] was a fill from the stack. in backtrack_insn()
3860 * that [fp - off] slot contains scalar that needs to be in backtrack_insn()
3863 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
3864 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
3872 return -ENOTSUPP; in backtrack_insn()
3874 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
3876 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
3877 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
3887 subprog_insn_idx = idx + insn->imm + 1; in backtrack_insn()
3890 return -EFAULT; in backtrack_insn()
3900 /* r1-r5 are invalidated after subprog call, in backtrack_insn()
3907 return -EFAULT; in backtrack_insn()
3915 * so only r1-r5 could be still requested as in backtrack_insn()
3916 * precise, r0 and r6-r10 or any stack slot in in backtrack_insn()
3922 return -EFAULT; in backtrack_insn()
3930 return -EFAULT; in backtrack_insn()
3932 /* propagate r1-r5 to the caller */ in backtrack_insn()
3936 bt_set_frame_reg(bt, bt->frame - 1, i); in backtrack_insn()
3940 return -EFAULT; in backtrack_insn()
3943 } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { in backtrack_insn()
3944 /* exit from callback subprog to callback-calling helper or in backtrack_insn()
3948 * propagate precision of r1-r5 (if any requested), as they are in backtrack_insn()
3954 return -EFAULT; in backtrack_insn()
3959 return -EFAULT; in backtrack_insn()
3961 /* clear r1-r5 in callback subprog's mask */ in backtrack_insn()
3965 return -EFAULT; in backtrack_insn()
3972 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) in backtrack_insn()
3973 return -ENOTSUPP; in backtrack_insn()
3977 /* if backtracing was looking for registers R1-R5 in backtrack_insn()
3982 return -EFAULT; in backtrack_insn()
3990 * precision to registers R1-R5 should have been found already. in backtrack_insn()
3991 * In case of a callback, it is ok to have R1-R5 marked for in backtrack_insn()
4001 return -EFAULT; in backtrack_insn()
4006 * whether the instruction at subseq_idx-1 is subprog in backtrack_insn()
4012 r0_precise = subseq_idx - 1 >= 0 && in backtrack_insn()
4013 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && in backtrack_insn()
4018 return -EFAULT; in backtrack_insn()
4022 /* r6-r9 and stack slots will stay set in caller frame in backtrack_insn()
4026 } else if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4037 } else if (BPF_SRC(insn->code) == BPF_K) { in backtrack_insn()
4040 * this insn, so for the K-based conditional in backtrack_insn()
4054 return -ENOTSUPP; in backtrack_insn()
4091 * r9 -= r8
4122 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4124 st->curframe); in mark_all_scalars_precise()
4130 * because precision markings in current non-checkpointed state are in mark_all_scalars_precise()
4133 for (st = st->parent; st; st = st->parent) { in mark_all_scalars_precise()
4134 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_precise()
4135 func = st->frame[i]; in mark_all_scalars_precise()
4137 reg = &func->regs[j]; in mark_all_scalars_precise()
4138 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
4140 reg->precise = true; in mark_all_scalars_precise()
4141 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4146 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
4147 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_precise()
4149 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_precise()
4150 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
4152 reg->precise = true; in mark_all_scalars_precise()
4153 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4155 i, -(j + 1) * 8); in mark_all_scalars_precise()
4168 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_imprecise()
4169 func = st->frame[i]; in mark_all_scalars_imprecise()
4171 reg = &func->regs[j]; in mark_all_scalars_imprecise()
4172 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
4174 reg->precise = false; in mark_all_scalars_imprecise()
4176 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
4177 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_imprecise()
4179 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_imprecise()
4180 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
4182 reg->precise = false; in mark_all_scalars_imprecise()
4204 * i.e., it is not yet put into env->explored_states, and it has no children
4207 * reached or b) checkpointed and put into env->explored_states, branching out
4276 struct backtrack_state *bt = &env->bt; in __mark_chain_precision()
4277 struct bpf_verifier_state *st = env->cur_state; in __mark_chain_precision()
4278 int first_idx = st->first_insn_idx; in __mark_chain_precision()
4279 int last_idx = env->insn_idx; in __mark_chain_precision()
4280 int subseq_idx = -1; in __mark_chain_precision()
4286 if (!env->bpf_capable) in __mark_chain_precision()
4290 bt_init(bt, env->cur_state->curframe); in __mark_chain_precision()
4296 func = st->frame[bt->frame]; in __mark_chain_precision()
4298 reg = &func->regs[regno]; in __mark_chain_precision()
4299 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4301 return -EFAULT; in __mark_chain_precision()
4311 u32 history = st->jmp_history_cnt; in __mark_chain_precision()
4314 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4316 bt->frame, last_idx, first_idx, subseq_idx); in __mark_chain_precision()
4322 * requested precise registers are R1-R5 in __mark_chain_precision()
4325 if (st->curframe == 0 && in __mark_chain_precision()
4326 st->frame[0]->subprogno > 0 && in __mark_chain_precision()
4327 st->frame[0]->callsite == BPF_MAIN_FUNC && in __mark_chain_precision()
4332 reg = &st->frame[0]->regs[i]; in __mark_chain_precision()
4334 if (reg->type == SCALAR_VALUE) in __mark_chain_precision()
4335 reg->precise = true; in __mark_chain_precision()
4341 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); in __mark_chain_precision()
4343 return -EFAULT; in __mark_chain_precision()
4354 if (err == -ENOTSUPP) { in __mark_chain_precision()
4355 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4369 if (i == -ENOENT) in __mark_chain_precision()
4371 if (i >= env->prog->len) { in __mark_chain_precision()
4380 return -EFAULT; in __mark_chain_precision()
4383 st = st->parent; in __mark_chain_precision()
4387 for (fr = bt->frame; fr >= 0; fr--) { in __mark_chain_precision()
4388 func = st->frame[fr]; in __mark_chain_precision()
4391 reg = &func->regs[i]; in __mark_chain_precision()
4392 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4396 if (reg->precise) in __mark_chain_precision()
4399 reg->precise = true; in __mark_chain_precision()
4404 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
4406 i, func->allocated_stack / BPF_REG_SIZE); in __mark_chain_precision()
4408 return -EFAULT; in __mark_chain_precision()
4411 if (!is_spilled_scalar_reg(&func->stack[i])) { in __mark_chain_precision()
4415 reg = &func->stack[i].spilled_ptr; in __mark_chain_precision()
4416 if (reg->precise) in __mark_chain_precision()
4419 reg->precise = true; in __mark_chain_precision()
4421 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4422 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4425 fr, env->tmp_str_buf); in __mark_chain_precision()
4426 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4428 verbose(env, "stack=%s: ", env->tmp_str_buf); in __mark_chain_precision()
4437 last_idx = st->last_insn_idx; in __mark_chain_precision()
4438 first_idx = st->first_insn_idx; in __mark_chain_precision()
4442 * something (e.g., stack access through non-r10 register), so in __mark_chain_precision()
4446 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4458 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4463 return __mark_chain_precision(env, -1); in mark_chain_precision_batch()
4496 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); in register_is_null()
4502 return reg->type == SCALAR_VALUE && in is_reg_const()
4503 tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off); in is_reg_const()
4509 return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value; in reg_const_value()
4518 return reg->type != SCALAR_VALUE; in __is_pointer_value()
4524 if (src_reg->type != SCALAR_VALUE) in assign_scalar_id_before_mov()
4527 if (src_reg->id & BPF_ADD_CONST) { in assign_scalar_id_before_mov()
4530 * rY->id has special linked register already. in assign_scalar_id_before_mov()
4533 src_reg->id = 0; in assign_scalar_id_before_mov()
4534 src_reg->off = 0; in assign_scalar_id_before_mov()
4537 if (!src_reg->id && !tnum_is_const(src_reg->var_off)) in assign_scalar_id_before_mov()
4542 src_reg->id = ++env->id_gen; in assign_scalar_id_before_mov()
4545 /* Copy src state preserving dst->parent and dst->live fields */
4548 struct bpf_reg_state *parent = dst->parent; in copy_register_state()
4549 enum bpf_reg_liveness live = dst->live; in copy_register_state()
4552 dst->parent = parent; in copy_register_state()
4553 dst->live = live; in copy_register_state()
4563 copy_register_state(&state->stack[spi].spilled_ptr, reg); in save_register_state()
4565 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in save_register_state()
4567 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
4568 state->stack[spi].slot_type[i - 1] = STACK_SPILL; in save_register_state()
4571 for (; i; i--) in save_register_state()
4572 mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]); in save_register_state()
4577 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; in is_bpf_st_mem()
4582 return fls64(reg->umax_value); in get_reg_width()
4589 struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno]; in check_fastcall_stack_contract()
4590 struct bpf_insn_aux_data *aux = env->insn_aux_data; in check_fastcall_stack_contract()
4593 if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern) in check_fastcall_stack_contract()
4600 subprog->fastcall_stack_off = S16_MIN; in check_fastcall_stack_contract()
4604 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in check_fastcall_stack_contract()
4620 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
4621 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
4623 int insn_flags = insn_stack_access_flags(state->frameno, spi); in check_stack_write_fixed_off()
4625 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, in check_stack_write_fixed_off()
4628 if (!env->allow_ptr_leaks && in check_stack_write_fixed_off()
4629 is_spilled_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
4632 return -EACCES; in check_stack_write_fixed_off()
4635 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_fixed_off()
4637 reg = &cur->regs[value_regno]; in check_stack_write_fixed_off()
4638 if (!env->bypass_spec_v4) { in check_stack_write_fixed_off()
4639 bool sanitize = reg && is_spillable_regtype(reg->type); in check_stack_write_fixed_off()
4642 u8 type = state->stack[spi].slot_type[i]; in check_stack_write_fixed_off()
4651 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; in check_stack_write_fixed_off()
4660 if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { in check_stack_write_fixed_off()
4670 state->stack[spi].spilled_ptr.id = 0; in check_stack_write_fixed_off()
4672 env->bpf_capable) { in check_stack_write_fixed_off()
4673 struct bpf_reg_state *tmp_reg = &env->fake_reg[0]; in check_stack_write_fixed_off()
4676 __mark_reg_known(tmp_reg, insn->imm); in check_stack_write_fixed_off()
4677 tmp_reg->type = SCALAR_VALUE; in check_stack_write_fixed_off()
4679 } else if (reg && is_spillable_regtype(reg->type)) { in check_stack_write_fixed_off()
4684 return -EACCES; in check_stack_write_fixed_off()
4686 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write_fixed_off()
4688 return -EINVAL; in check_stack_write_fixed_off()
4695 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_fixed_off()
4697 if (is_stack_slot_special(&state->stack[spi])) in check_stack_write_fixed_off()
4699 scrub_spilled_slot(&state->stack[spi].slot_type[i]); in check_stack_write_fixed_off()
4710 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in check_stack_write_fixed_off()
4714 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { in check_stack_write_fixed_off()
4729 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; in check_stack_write_fixed_off()
4734 return push_jmp_history(env, env->cur_state, insn_flags, 0); in check_stack_write_fixed_off()
4744 * 'off' includes 'regno->off'.
4745 * 'value_regno' can be -1, meaning that an unknown value is being written to
4767 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
4774 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_var_off()
4775 ptr_reg = &cur->regs[ptr_regno]; in check_stack_write_var_off()
4776 min_off = ptr_reg->smin_value + off; in check_stack_write_var_off()
4777 max_off = ptr_reg->smax_value + off + size; in check_stack_write_var_off()
4779 value_reg = &cur->regs[value_regno]; in check_stack_write_var_off()
4781 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) in check_stack_write_var_off()
4799 slot = -i - 1; in check_stack_write_var_off()
4801 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
4804 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { in check_stack_write_var_off()
4816 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", in check_stack_write_var_off()
4818 return -EINVAL; in check_stack_write_var_off()
4825 is_spilled_scalar_reg(&state->stack[spi])) { in check_stack_write_var_off()
4826 struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr; in check_stack_write_var_off()
4828 if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) { in check_stack_write_var_off()
4835 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_var_off()
4851 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { in check_stack_write_var_off()
4852 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", in check_stack_write_var_off()
4854 return -EINVAL; in check_stack_write_var_off()
4880 struct bpf_verifier_state *vstate = env->cur_state; in mark_reg_stack_read()
4881 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_reg_stack_read()
4887 slot = -i - 1; in mark_reg_stack_read()
4890 stype = ptr_state->stack[spi].slot_type; in mark_reg_stack_read()
4895 if (zeros == max_off - min_off) { in mark_reg_stack_read()
4899 __mark_reg_const_zero(env, &state->regs[dst_regno]); in mark_reg_stack_read()
4902 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
4904 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in mark_reg_stack_read()
4911 * 'dst_regno' can be -1, meaning that the read value is not going to a
4921 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read_fixed_off()
4922 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read_fixed_off()
4923 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
4926 int insn_flags = insn_stack_access_flags(reg_state->frameno, spi); in check_stack_read_fixed_off()
4928 stype = reg_state->stack[spi].slot_type; in check_stack_read_fixed_off()
4929 reg = &reg_state->stack[spi].spilled_ptr; in check_stack_read_fixed_off()
4932 check_fastcall_stack_contract(env, state, env->insn_idx, off); in check_stack_read_fixed_off()
4934 if (is_spilled_reg(&reg_state->stack[spi])) { in check_stack_read_fixed_off()
4937 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
4941 if (reg->type != SCALAR_VALUE) { in check_stack_read_fixed_off()
4942 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read_fixed_off()
4944 return -EACCES; in check_stack_read_fixed_off()
4947 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4956 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
4958 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
4959 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
4965 state->regs[dst_regno].id = 0; in check_stack_read_fixed_off()
4970 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
4981 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
4985 return -EACCES; in check_stack_read_fixed_off()
4989 tnum_is_const(reg->var_off) && reg->var_off.value == 0) { in check_stack_read_fixed_off()
4990 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
4994 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
4997 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
5001 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
5004 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
5009 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
5010 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { in check_stack_read_fixed_off()
5011 /* If dst_regno==-1, the caller is asking us whether in check_stack_read_fixed_off()
5019 return -EACCES; in check_stack_read_fixed_off()
5021 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
5024 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5029 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
5033 return -EACCES; in check_stack_read_fixed_off()
5035 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
5041 return push_jmp_history(env, env->cur_state, insn_flags, 0); in check_stack_read_fixed_off()
5090 min_off = reg->smin_value + off; in check_stack_read_var_off()
5091 max_off = reg->smax_value + off; in check_stack_read_var_off()
5093 check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off); in check_stack_read_var_off()
5104 * can be -1, meaning that the read value is not going to a register.
5114 bool var_off = !tnum_is_const(reg->var_off); in check_stack_read()
5123 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_read()
5126 return -EACCES; in check_stack_read()
5138 off += reg->var_off.value; in check_stack_read()
5157 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
5159 * be -1, meaning that we're not writing from a register.
5171 if (tnum_is_const(reg->var_off)) { in check_stack_write()
5172 off += reg->var_off.value; in check_stack_write()
5195 map->value_size, off, size); in check_map_access_type()
5196 return -EACCES; in check_map_access_type()
5201 map->value_size, off, size); in check_map_access_type()
5202 return -EACCES; in check_map_access_type()
5220 switch (reg->type) { in __check_mem_access()
5233 off, size, regno, reg->id, off, mem_size); in __check_mem_access()
5241 return -EACCES; in __check_mem_access()
5249 struct bpf_verifier_state *vstate = env->cur_state; in check_mem_region_access()
5250 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_mem_region_access()
5251 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
5264 if (reg->smin_value < 0 && in check_mem_region_access()
5265 (reg->smin_value == S64_MIN || in check_mem_region_access()
5266 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || in check_mem_region_access()
5267 reg->smin_value + off < 0)) { in check_mem_region_access()
5270 return -EACCES; in check_mem_region_access()
5272 err = __check_mem_access(env, regno, reg->smin_value + off, size, in check_mem_region_access()
5282 * If reg->umax_value + off could overflow, treat that as unbounded too. in check_mem_region_access()
5284 if (reg->umax_value >= BPF_MAX_VAR_OFF) { in check_mem_region_access()
5287 return -EACCES; in check_mem_region_access()
5289 err = __check_mem_access(env, regno, reg->umax_value + off, size, in check_mem_region_access()
5304 /* Access to this pointer-typed register or passing it to a helper in __check_ptr_off_reg()
5308 if (reg->off < 0) { in __check_ptr_off_reg()
5310 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5311 return -EACCES; in __check_ptr_off_reg()
5314 if (!fixed_off_ok && reg->off) { in __check_ptr_off_reg()
5316 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5317 return -EACCES; in __check_ptr_off_reg()
5320 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_ptr_off_reg()
5323 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_ptr_off_reg()
5325 reg_type_str(env, reg->type), tn_buf); in __check_ptr_off_reg()
5326 return -EACCES; in __check_ptr_off_reg()
5342 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); in map_kptr_match_type()
5346 if (btf_is_kernel(reg->btf)) { in map_kptr_match_type()
5350 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5354 if (kptr_field->type == BPF_KPTR_PERCPU) in map_kptr_match_type()
5358 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) in map_kptr_match_type()
5361 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ in map_kptr_match_type()
5362 reg_name = btf_type_name(reg->btf, reg->btf_id); in map_kptr_match_type()
5368 * reg->off and reg->ref_obj_id are not needed here. in map_kptr_match_type()
5371 return -EACCES; in map_kptr_match_type()
5374 * we also need to take into account the reg->off. in map_kptr_match_type()
5385 * val->foo = v; // reg->off is zero, btf and btf_id match type in map_kptr_match_type()
5386 * val->bar = &v->br; // reg->off is still zero, but we need to retry with in map_kptr_match_type()
5388 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked in map_kptr_match_type()
5391 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off in map_kptr_match_type()
5397 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in map_kptr_match_type()
5398 kptr_field->kptr.btf, kptr_field->kptr.btf_id, in map_kptr_match_type()
5399 kptr_field->type != BPF_KPTR_UNREF)) in map_kptr_match_type()
5404 reg_type_str(env, reg->type), reg_name); in map_kptr_match_type()
5406 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5411 return -EINVAL; in map_kptr_match_type()
5416 return env->prog->sleepable || in in_sleepable()
5417 (env->cur_state && env->cur_state->in_sleepable); in in_sleepable()
5420 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5425 return env->cur_state->active_rcu_lock || in in_rcu_cs()
5426 env->cur_state->active_lock.ptr || in in_rcu_cs()
5454 if (btf_is_kernel(kptr_field->kptr.btf)) in kptr_pointee_btf_record()
5457 meta = btf_find_struct_meta(kptr_field->kptr.btf, in kptr_pointee_btf_record()
5458 kptr_field->kptr.btf_id); in kptr_pointee_btf_record()
5460 return meta ? meta->record : NULL; in kptr_pointee_btf_record()
5465 const struct btf_field_kptr *kptr = &field->kptr; in rcu_safe_kptr()
5467 return field->type == BPF_KPTR_PERCPU || in rcu_safe_kptr()
5468 (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); in rcu_safe_kptr()
5479 if (kptr_field->type == BPF_KPTR_PERCPU) in btf_ld_kptr_type()
5481 else if (!btf_is_kernel(kptr_field->kptr.btf)) in btf_ld_kptr_type()
5498 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
5499 int class = BPF_CLASS(insn->code); in check_map_kptr_access()
5503 * - Reject cases where variable offset may touch kptr in check_map_kptr_access()
5504 * - size of access (must be BPF_DW) in check_map_kptr_access()
5505 * - tnum_is_const(reg->var_off) in check_map_kptr_access()
5506 * - kptr_field->offset == off + reg->var_off.value in check_map_kptr_access()
5509 if (BPF_MODE(insn->code) != BPF_MEM) { in check_map_kptr_access()
5511 return -EACCES; in check_map_kptr_access()
5518 (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { in check_map_kptr_access()
5520 return -EACCES; in check_map_kptr_access()
5528 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, in check_map_kptr_access()
5529 kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field)); in check_map_kptr_access()
5534 return -EACCES; in check_map_kptr_access()
5536 if (insn->imm) { in check_map_kptr_access()
5538 kptr_field->offset); in check_map_kptr_access()
5539 return -EACCES; in check_map_kptr_access()
5543 return -EACCES; in check_map_kptr_access()
5553 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
5554 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access()
5555 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
5556 struct bpf_map *map = reg->map_ptr; in check_map_access()
5560 err = check_mem_region_access(env, regno, off, size, map->value_size, in check_map_access()
5565 if (IS_ERR_OR_NULL(map->record)) in check_map_access()
5567 rec = map->record; in check_map_access()
5568 for (i = 0; i < rec->cnt; i++) { in check_map_access()
5569 struct btf_field *field = &rec->fields[i]; in check_map_access()
5570 u32 p = field->offset; in check_map_access()
5576 if (reg->smin_value + off < p + field->size && in check_map_access()
5577 p < reg->umax_value + off + size) { in check_map_access()
5578 switch (field->type) { in check_map_access()
5584 return -EACCES; in check_map_access()
5586 if (!tnum_is_const(reg->var_off)) { in check_map_access()
5588 return -EACCES; in check_map_access()
5590 if (p != off + reg->var_off.value) { in check_map_access()
5592 p, off + reg->var_off.value); in check_map_access()
5593 return -EACCES; in check_map_access()
5597 return -EACCES; in check_map_access()
5602 btf_field_type_name(field->type)); in check_map_access()
5603 return -EACCES; in check_map_access()
5616 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
5638 return meta->pkt_access; in may_access_direct_pkt_data()
5640 env->seen_direct_write = true; in may_access_direct_pkt_data()
5645 env->seen_direct_write = true; in may_access_direct_pkt_data()
5662 * reg->range we have comes after that. We are only checking the fixed in check_packet_access()
5669 if (reg->smin_value < 0) { in check_packet_access()
5672 return -EACCES; in check_packet_access()
5675 err = reg->range < 0 ? -EINVAL : in check_packet_access()
5676 __check_mem_access(env, regno, off, size, reg->range, in check_packet_access()
5683 /* __check_mem_access has made sure "off + size - 1" is within u16. in check_packet_access()
5684 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, in check_packet_access()
5687 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. in check_packet_access()
5689 env->prog->aux->max_pkt_offset = in check_packet_access()
5690 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
5691 off + reg->umax_value + size - 1); in check_packet_access()
5703 .log = &env->log, in check_ctx_access()
5708 if (env->ops->is_valid_access && in check_ctx_access()
5709 env->ops->is_valid_access(off, size, t, env->prog, &info)) { in check_ctx_access()
5724 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
5727 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
5728 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
5733 return -EACCES; in check_ctx_access()
5743 return -EACCES; in check_flow_keys_access()
5757 if (reg->smin_value < 0) { in check_sock_access()
5760 return -EACCES; in check_sock_access()
5763 switch (reg->type) { in check_sock_access()
5782 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
5788 regno, reg_type_str(env, reg->type), off, size); in check_sock_access()
5790 return -EACCES; in check_sock_access()
5795 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
5802 return reg->type == PTR_TO_CTX; in is_ctx_reg()
5809 return type_is_sk_pointer(reg->type); in is_sk_reg()
5816 return type_is_pkt_pointer(reg->type); in is_pkt_reg()
5824 return reg->type == PTR_TO_FLOW_KEYS; in is_flow_key_reg()
5831 return reg->type == PTR_TO_ARENA; in is_arena_reg()
5846 if (reg->ref_obj_id) in is_trusted_reg()
5850 if (reg2btf_ids[base_type(reg->type)] && in is_trusted_reg()
5851 !bpf_type_has_unsafe_modifiers(reg->type)) in is_trusted_reg()
5856 * other type modifiers may be safe, but we elect to take an opt-in in is_trusted_reg()
5863 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && in is_trusted_reg()
5864 !bpf_type_has_unsafe_modifiers(reg->type); in is_trusted_reg()
5869 return reg->type & MEM_RCU; in is_rcu_reg()
5898 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); in check_pkt_ptr_alignment()
5902 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_pkt_ptr_alignment()
5905 ip_align, tn_buf, reg->off, off, size); in check_pkt_ptr_alignment()
5906 return -EACCES; in check_pkt_ptr_alignment()
5923 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); in check_generic_ptr_alignment()
5927 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_generic_ptr_alignment()
5929 pointer_desc, tn_buf, reg->off, off, size); in check_generic_ptr_alignment()
5930 return -EACCES; in check_generic_ptr_alignment()
5940 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
5943 switch (reg->type) { in check_ptr_alignment()
5993 if (env->prog->jit_requested) in round_up_stack_depth()
5996 /* round up to 32-bytes, since this is granularity in round_up_stack_depth()
6010 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth_subprog()
6011 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog()
6027 * func1 -> sub rsp, 128 in check_max_stack_depth_subprog()
6028 * subfunc1 -> sub rsp, 256 in check_max_stack_depth_subprog()
6029 * tailcall1 -> add rsp, 256 in check_max_stack_depth_subprog()
6030 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) in check_max_stack_depth_subprog()
6031 * subfunc2 -> sub rsp, 64 in check_max_stack_depth_subprog()
6032 * subfunc22 -> sub rsp, 128 in check_max_stack_depth_subprog()
6033 * tailcall2 -> add rsp, 128 in check_max_stack_depth_subprog()
6034 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) in check_max_stack_depth_subprog()
6043 return -EACCES; in check_max_stack_depth_subprog()
6049 return -EACCES; in check_max_stack_depth_subprog()
6074 return -EINVAL; in check_max_stack_depth_subprog()
6089 return -EFAULT; in check_max_stack_depth_subprog()
6094 return -EFAULT; in check_max_stack_depth_subprog()
6101 return -EINVAL; in check_max_stack_depth_subprog()
6114 return -E2BIG; in check_max_stack_depth_subprog()
6127 return -EINVAL; in check_max_stack_depth_subprog()
6132 env->prog->aux->tail_call_reachable = true; in check_max_stack_depth_subprog()
6139 depth -= round_up_stack_depth(env, subprog[idx].stack_depth); in check_max_stack_depth_subprog()
6140 frame--; in check_max_stack_depth_subprog()
6148 struct bpf_subprog_info *si = env->subprog_info; in check_max_stack_depth()
6151 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
6166 int start = idx + insn->imm + 1, subprog; in get_callee_stack_depth()
6172 return -EFAULT; in get_callee_stack_depth()
6174 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
6187 return -EACCES; in __check_buffer_access()
6189 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_buffer_access()
6192 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_buffer_access()
6196 return -EACCES; in __check_buffer_access()
6212 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
6213 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
6224 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; in check_buffer_access()
6237 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
6240 reg->var_off = tnum_subreg(reg->var_off); in zext_32_to_64()
6252 reg->var_off = tnum_cast(reg->var_off, size); in coerce_reg_to_size()
6255 mask = ((u64)1 << (size * 8)) - 1; in coerce_reg_to_size()
6256 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { in coerce_reg_to_size()
6257 reg->umin_value &= mask; in coerce_reg_to_size()
6258 reg->umax_value &= mask; in coerce_reg_to_size()
6260 reg->umin_value = 0; in coerce_reg_to_size()
6261 reg->umax_value = mask; in coerce_reg_to_size()
6263 reg->smin_value = reg->umin_value; in coerce_reg_to_size()
6264 reg->smax_value = reg->umax_value; in coerce_reg_to_size()
6267 * values are also truncated so we push 64-bit bounds into in coerce_reg_to_size()
6268 * 32-bit bounds. Above were truncated < 32-bits already. in coerce_reg_to_size()
6279 reg->smin_value = reg->s32_min_value = S8_MIN; in set_sext64_default_val()
6280 reg->smax_value = reg->s32_max_value = S8_MAX; in set_sext64_default_val()
6282 reg->smin_value = reg->s32_min_value = S16_MIN; in set_sext64_default_val()
6283 reg->smax_value = reg->s32_max_value = S16_MAX; in set_sext64_default_val()
6286 reg->smin_value = reg->s32_min_value = S32_MIN; in set_sext64_default_val()
6287 reg->smax_value = reg->s32_max_value = S32_MAX; in set_sext64_default_val()
6289 reg->umin_value = reg->u32_min_value = 0; in set_sext64_default_val()
6290 reg->umax_value = U64_MAX; in set_sext64_default_val()
6291 reg->u32_max_value = U32_MAX; in set_sext64_default_val()
6292 reg->var_off = tnum_unknown; in set_sext64_default_val()
6301 if (tnum_is_const(reg->var_off)) { in coerce_reg_to_size_sx()
6302 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6304 reg->var_off = tnum_const((s8)u64_cval); in coerce_reg_to_size_sx()
6306 reg->var_off = tnum_const((s16)u64_cval); in coerce_reg_to_size_sx()
6309 reg->var_off = tnum_const((s32)u64_cval); in coerce_reg_to_size_sx()
6311 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6312 reg->smax_value = reg->smin_value = u64_cval; in coerce_reg_to_size_sx()
6313 reg->umax_value = reg->umin_value = u64_cval; in coerce_reg_to_size_sx()
6314 reg->s32_max_value = reg->s32_min_value = u64_cval; in coerce_reg_to_size_sx()
6315 reg->u32_max_value = reg->u32_min_value = u64_cval; in coerce_reg_to_size_sx()
6319 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6320 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6327 init_s64_max = (s8)reg->smax_value; in coerce_reg_to_size_sx()
6328 init_s64_min = (s8)reg->smin_value; in coerce_reg_to_size_sx()
6330 init_s64_max = (s16)reg->smax_value; in coerce_reg_to_size_sx()
6331 init_s64_min = (s16)reg->smin_value; in coerce_reg_to_size_sx()
6333 init_s64_max = (s32)reg->smax_value; in coerce_reg_to_size_sx()
6334 init_s64_min = (s32)reg->smin_value; in coerce_reg_to_size_sx()
6342 reg->s32_min_value = reg->smin_value = s64_min; in coerce_reg_to_size_sx()
6343 reg->s32_max_value = reg->smax_value = s64_max; in coerce_reg_to_size_sx()
6344 reg->u32_min_value = reg->umin_value = s64_min; in coerce_reg_to_size_sx()
6345 reg->u32_max_value = reg->umax_value = s64_max; in coerce_reg_to_size_sx()
6346 reg->var_off = tnum_range(s64_min, s64_max); in coerce_reg_to_size_sx()
6357 reg->s32_min_value = S8_MIN; in set_sext32_default_val()
6358 reg->s32_max_value = S8_MAX; in set_sext32_default_val()
6361 reg->s32_min_value = S16_MIN; in set_sext32_default_val()
6362 reg->s32_max_value = S16_MAX; in set_sext32_default_val()
6364 reg->u32_min_value = 0; in set_sext32_default_val()
6365 reg->u32_max_value = U32_MAX; in set_sext32_default_val()
6366 reg->var_off = tnum_subreg(tnum_unknown); in set_sext32_default_val()
6375 if (tnum_is_const(reg->var_off)) { in coerce_subreg_to_size_sx()
6376 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6378 reg->var_off = tnum_const((s8)u32_val); in coerce_subreg_to_size_sx()
6380 reg->var_off = tnum_const((s16)u32_val); in coerce_subreg_to_size_sx()
6382 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6383 reg->s32_min_value = reg->s32_max_value = u32_val; in coerce_subreg_to_size_sx()
6384 reg->u32_min_value = reg->u32_max_value = u32_val; in coerce_subreg_to_size_sx()
6388 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6389 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6396 init_s32_max = (s8)reg->s32_max_value; in coerce_subreg_to_size_sx()
6397 init_s32_min = (s8)reg->s32_min_value; in coerce_subreg_to_size_sx()
6400 init_s32_max = (s16)reg->s32_max_value; in coerce_subreg_to_size_sx()
6401 init_s32_min = (s16)reg->s32_min_value; in coerce_subreg_to_size_sx()
6407 reg->s32_min_value = s32_min; in coerce_subreg_to_size_sx()
6408 reg->s32_max_value = s32_max; in coerce_subreg_to_size_sx()
6409 reg->u32_min_value = (u32)s32_min; in coerce_subreg_to_size_sx()
6410 reg->u32_max_value = (u32)s32_max; in coerce_subreg_to_size_sx()
6411 reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max)); in coerce_subreg_to_size_sx()
6421 /* A map is considered read-only if the following condition are true: in bpf_map_is_rdonly()
6434 return (map->map_flags & BPF_F_RDONLY_PROG) && in bpf_map_is_rdonly()
6435 READ_ONCE(map->frozen) && in bpf_map_is_rdonly()
6446 err = map->ops->map_direct_value_addr(map, &addr, off); in bpf_map_direct_read()
6465 return -EINVAL; in bpf_map_direct_read()
6490 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ in BTF_TYPE_SAFE_RCU()
6503 /* skb->sk, req->sk are not RCU protected, but we mark them as such
6533 /* no negative dentry-s in places where bpf can see it */ in BTF_TYPE_SAFE_TRUSTED()
6549 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); in type_is_rcu()
6560 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); in type_is_rcu_or_null()
6573 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); in type_is_trusted()
6582 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, in type_is_trusted_or_null()
6593 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); in check_ptr_to_btf_access()
6594 const char *tname = btf_name_by_offset(reg->btf, t->name_off); in check_ptr_to_btf_access()
6600 if (!env->allow_ptr_leaks) { in check_ptr_to_btf_access()
6604 return -EPERM; in check_ptr_to_btf_access()
6606 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
6608 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", in check_ptr_to_btf_access()
6610 return -EINVAL; in check_ptr_to_btf_access()
6616 return -EACCES; in check_ptr_to_btf_access()
6618 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_ptr_to_btf_access()
6621 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_ptr_to_btf_access()
6625 return -EACCES; in check_ptr_to_btf_access()
6628 if (reg->type & MEM_USER) { in check_ptr_to_btf_access()
6632 return -EACCES; in check_ptr_to_btf_access()
6635 if (reg->type & MEM_PERCPU) { in check_ptr_to_btf_access()
6639 return -EACCES; in check_ptr_to_btf_access()
6642 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { in check_ptr_to_btf_access()
6643 if (!btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
6644 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); in check_ptr_to_btf_access()
6645 return -EFAULT; in check_ptr_to_btf_access()
6647 ret = env->ops->btf_struct_access(&env->log, reg, off, size); in check_ptr_to_btf_access()
6653 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { in check_ptr_to_btf_access()
6655 return -EACCES; in check_ptr_to_btf_access()
6658 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && in check_ptr_to_btf_access()
6659 !(reg->type & MEM_RCU) && !reg->ref_obj_id) { in check_ptr_to_btf_access()
6660 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); in check_ptr_to_btf_access()
6661 return -EFAULT; in check_ptr_to_btf_access()
6664 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); in check_ptr_to_btf_access()
6673 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { in check_ptr_to_btf_access()
6684 * 'cgroups' pointer is untrusted if task->cgroups dereference in check_ptr_to_btf_access()
6686 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). in check_ptr_to_btf_access()
6689 * A regular RCU-protected pointer with __rcu tag can also be deemed in check_ptr_to_btf_access()
6696 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { in check_ptr_to_btf_access()
6710 /* keep as-is */ in check_ptr_to_btf_access()
6731 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
6743 struct bpf_map *map = reg->map_ptr; in check_ptr_to_map_access()
6753 return -ENOTSUPP; in check_ptr_to_map_access()
6756 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { in check_ptr_to_map_access()
6758 map->map_type); in check_ptr_to_map_access()
6759 return -ENOTSUPP; in check_ptr_to_map_access()
6762 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); in check_ptr_to_map_access()
6763 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_ptr_to_map_access()
6765 if (!env->allow_ptr_leaks) { in check_ptr_to_map_access()
6769 return -EPERM; in check_ptr_to_map_access()
6775 return -EACCES; in check_ptr_to_map_access()
6780 return -EACCES; in check_ptr_to_map_access()
6785 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); in check_ptr_to_map_access()
6786 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); in check_ptr_to_map_access()
6797 * maximum valid offset is -1.
6799 * The minimum valid offset is -MAX_BPF_STACK for writes, and
6800 * -state->allocated_stack for reads.
6809 if (t == BPF_WRITE || env->allow_uninit_stack) in check_stack_slot_within_bounds()
6810 min_valid_off = -MAX_BPF_STACK; in check_stack_slot_within_bounds()
6812 min_valid_off = -state->allocated_stack; in check_stack_slot_within_bounds()
6814 if (off < min_valid_off || off > -1) in check_stack_slot_within_bounds()
6815 return -EACCES; in check_stack_slot_within_bounds()
6822 * 'off' includes `regno->offset`, but not its dynamic part (if any).
6844 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
6845 min_off = (s64)reg->var_off.value + off; in check_stack_access_within_bounds()
6848 if (reg->smax_value >= BPF_MAX_VAR_OFF || in check_stack_access_within_bounds()
6849 reg->smin_value <= -BPF_MAX_VAR_OFF) { in check_stack_access_within_bounds()
6850 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", in check_stack_access_within_bounds()
6852 return -EACCES; in check_stack_access_within_bounds()
6854 min_off = reg->smin_value + off; in check_stack_access_within_bounds()
6855 max_off = reg->smax_value + off + access_size; in check_stack_access_within_bounds()
6860 err = -EINVAL; /* out of stack access into non-negative offsets */ in check_stack_access_within_bounds()
6865 err = -EFAULT; /* invalid negative access size; integer overflow? */ in check_stack_access_within_bounds()
6868 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
6874 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_within_bounds()
6875 verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n", in check_stack_access_within_bounds()
6882 * size is -min_off, not -min_off+1. in check_stack_access_within_bounds()
6884 return grow_stack_state(env, state, -min_off /* size */); in check_stack_access_within_bounds()
6890 if (prog->type == BPF_PROG_TYPE_LSM && in get_func_retval_range()
6891 prog->expected_attach_type == BPF_LSM_MAC && in get_func_retval_range()
6901 * if t==write && value_regno==-1, some unknown value is stored into memory
6902 * if t==read && value_regno==-1, don't care what we read from memory
6916 /* alignment checks will add in reg->off themselves */ in check_mem_access()
6921 /* for access checks, reg->off is just part of off */ in check_mem_access()
6922 off += reg->off; in check_mem_access()
6924 if (reg->type == PTR_TO_MAP_KEY) { in check_mem_access()
6927 return -EACCES; in check_mem_access()
6931 reg->map_ptr->key_size, false); in check_mem_access()
6936 } else if (reg->type == PTR_TO_MAP_VALUE) { in check_mem_access()
6942 return -EACCES; in check_mem_access()
6950 if (tnum_is_const(reg->var_off)) in check_mem_access()
6951 kptr_field = btf_record_find(reg->map_ptr->record, in check_mem_access()
6952 off + reg->var_off.value, BPF_KPTR); in check_mem_access()
6956 struct bpf_map *map = reg->map_ptr; in check_mem_access()
6958 /* if map is read-only, track its contents as scalars */ in check_mem_access()
6959 if (tnum_is_const(reg->var_off) && in check_mem_access()
6961 map->ops->map_direct_value_addr) { in check_mem_access()
6962 int map_off = off + reg->var_off.value; in check_mem_access()
6976 } else if (base_type(reg->type) == PTR_TO_MEM) { in check_mem_access()
6977 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
6979 if (type_may_be_null(reg->type)) { in check_mem_access()
6981 reg_type_str(env, reg->type)); in check_mem_access()
6982 return -EACCES; in check_mem_access()
6987 regno, reg_type_str(env, reg->type)); in check_mem_access()
6988 return -EACCES; in check_mem_access()
6994 return -EACCES; in check_mem_access()
6998 reg->mem_size, false); in check_mem_access()
7001 } else if (reg->type == PTR_TO_CTX) { in check_mem_access()
7011 return -EACCES; in check_mem_access()
7028 if (is_retval && get_func_retval_range(env->prog, &range)) { in check_mem_access()
7040 regs[value_regno].id = ++env->id_gen; in check_mem_access()
7044 * a sub-register. in check_mem_access()
7055 } else if (reg->type == PTR_TO_STACK) { in check_mem_access()
7070 return -EACCES; in check_mem_access()
7076 return -EACCES; in check_mem_access()
7081 } else if (reg->type == PTR_TO_FLOW_KEYS) { in check_mem_access()
7086 return -EACCES; in check_mem_access()
7092 } else if (type_is_sk_pointer(reg->type)) { in check_mem_access()
7095 regno, reg_type_str(env, reg->type)); in check_mem_access()
7096 return -EACCES; in check_mem_access()
7101 } else if (reg->type == PTR_TO_TP_BUFFER) { in check_mem_access()
7105 } else if (base_type(reg->type) == PTR_TO_BTF_ID && in check_mem_access()
7106 !type_may_be_null(reg->type)) { in check_mem_access()
7109 } else if (reg->type == CONST_PTR_TO_MAP) { in check_mem_access()
7112 } else if (base_type(reg->type) == PTR_TO_BUF) { in check_mem_access()
7113 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
7119 regno, reg_type_str(env, reg->type)); in check_mem_access()
7120 return -EACCES; in check_mem_access()
7122 max_access = &env->prog->aux->max_rdonly_access; in check_mem_access()
7124 max_access = &env->prog->aux->max_rdwr_access; in check_mem_access()
7132 } else if (reg->type == PTR_TO_ARENA) { in check_mem_access()
7137 reg_type_str(env, reg->type)); in check_mem_access()
7138 return -EACCES; in check_mem_access()
7144 /* b/h/w load zero-extends, mark upper bits as known 0 */ in check_mem_access()
7160 switch (insn->imm) { in check_atomic()
7173 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); in check_atomic()
7174 return -EINVAL; in check_atomic()
7177 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { in check_atomic()
7179 return -EINVAL; in check_atomic()
7183 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_atomic()
7188 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_atomic()
7192 if (insn->imm == BPF_CMPXCHG) { in check_atomic()
7202 return -EACCES; in check_atomic()
7206 if (is_pointer_value(env, insn->src_reg)) { in check_atomic()
7207 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_atomic()
7208 return -EACCES; in check_atomic()
7211 if (is_ctx_reg(env, insn->dst_reg) || in check_atomic()
7212 is_pkt_reg(env, insn->dst_reg) || in check_atomic()
7213 is_flow_key_reg(env, insn->dst_reg) || in check_atomic()
7214 is_sk_reg(env, insn->dst_reg) || in check_atomic()
7215 (is_arena_reg(env, insn->dst_reg) && !bpf_jit_supports_insn(insn, true))) { in check_atomic()
7217 insn->dst_reg, in check_atomic()
7218 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic()
7219 return -EACCES; in check_atomic()
7222 if (insn->imm & BPF_FETCH) { in check_atomic()
7223 if (insn->imm == BPF_CMPXCHG) in check_atomic()
7226 load_reg = insn->src_reg; in check_atomic()
7236 load_reg = -1; in check_atomic()
7242 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7243 BPF_SIZE(insn->code), BPF_READ, -1, true, false); in check_atomic()
7245 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7246 BPF_SIZE(insn->code), BPF_READ, load_reg, in check_atomic()
7251 if (is_arena_reg(env, insn->dst_reg)) { in check_atomic()
7257 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7258 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); in check_atomic()
7269 * 'off' includes 'regno->off', but not its dynamic part (if any).
7285 * read-only. in check_stack_range_initialized()
7290 verbose(env, "invalid zero-sized read\n"); in check_stack_range_initialized()
7291 return -EACCES; in check_stack_range_initialized()
7310 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7311 min_off = max_off = reg->var_off.value + off; in check_stack_range_initialized()
7318 if (!env->bypass_spec_v1) { in check_stack_range_initialized()
7321 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7324 return -EACCES; in check_stack_range_initialized()
7332 if (meta && meta->raw_mode) in check_stack_range_initialized()
7335 min_off = reg->smin_value + off; in check_stack_range_initialized()
7336 max_off = reg->smax_value + off; in check_stack_range_initialized()
7339 if (meta && meta->raw_mode) { in check_stack_range_initialized()
7354 int stack_off = -i - 1; in check_stack_range_initialized()
7358 if (state->allocated_stack <= stack_off) in check_stack_range_initialized()
7360 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
7362 return -EACCES; in check_stack_range_initialized()
7365 meta->access_size = access_size; in check_stack_range_initialized()
7366 meta->regno = regno; in check_stack_range_initialized()
7373 slot = -i - 1; in check_stack_range_initialized()
7375 if (state->allocated_stack <= slot) { in check_stack_range_initialized()
7377 return -EFAULT; in check_stack_range_initialized()
7380 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
7384 (*stype == STACK_INVALID && env->allow_uninit_stack)) { in check_stack_range_initialized()
7392 if (is_spilled_reg(&state->stack[spi]) && in check_stack_range_initialized()
7393 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || in check_stack_range_initialized()
7394 env->allow_ptr_leaks)) { in check_stack_range_initialized()
7396 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_range_initialized()
7398 scrub_spilled_slot(&state->stack[spi].slot_type[j]); in check_stack_range_initialized()
7403 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7405 err_extra, regno, min_off, i - min_off, access_size); in check_stack_range_initialized()
7409 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7411 err_extra, regno, tn_buf, i - min_off, access_size); in check_stack_range_initialized()
7413 return -EACCES; in check_stack_range_initialized()
7415 /* reading any byte out of 8-byte 'spill_slot' will cause in check_stack_range_initialized()
7418 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_range_initialized()
7419 state->stack[spi].spilled_ptr.parent, in check_stack_range_initialized()
7438 switch (base_type(reg->type)) { in check_helper_mem_access()
7441 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7446 reg_type_str(env, reg->type)); in check_helper_mem_access()
7447 return -EACCES; in check_helper_mem_access()
7449 return check_mem_region_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7450 reg->map_ptr->key_size, false); in check_helper_mem_access()
7452 if (check_map_access_type(env, regno, reg->off, access_size, access_type)) in check_helper_mem_access()
7453 return -EACCES; in check_helper_mem_access()
7454 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7457 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
7460 reg_type_str(env, reg->type)); in check_helper_mem_access()
7461 return -EACCES; in check_helper_mem_access()
7464 return check_mem_region_access(env, regno, reg->off, in check_helper_mem_access()
7465 access_size, reg->mem_size, in check_helper_mem_access()
7468 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
7471 reg_type_str(env, reg->type)); in check_helper_mem_access()
7472 return -EACCES; in check_helper_mem_access()
7475 max_access = &env->prog->aux->max_rdonly_access; in check_helper_mem_access()
7477 max_access = &env->prog->aux->max_rdwr_access; in check_helper_mem_access()
7479 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
7485 regno, reg->off, access_size, in check_helper_mem_access()
7488 return check_ptr_to_btf_access(env, regs, regno, reg->off, in check_helper_mem_access()
7489 access_size, BPF_READ, -1); in check_helper_mem_access()
7496 if (!env->ops->convert_ctx_access) { in check_helper_mem_access()
7497 int offset = access_size - 1; in check_helper_mem_access()
7499 /* Allow zero-byte read from PTR_TO_CTX */ in check_helper_mem_access()
7501 return zero_size_allowed ? 0 : -EACCES; in check_helper_mem_access()
7503 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, in check_helper_mem_access()
7504 access_type, -1, false, false); in check_helper_mem_access()
7509 /* Allow zero-byte read from NULL, regardless of pointer type */ in check_helper_mem_access()
7515 reg_type_str(env, reg->type)); in check_helper_mem_access()
7517 return -EACCES; in check_helper_mem_access()
7524 * @regno is the register containing the access size. regno-1 is the register
7543 meta->msize_max_value = reg->umax_value; in check_mem_size_reg()
7550 if (!tnum_is_const(reg->var_off)) in check_mem_size_reg()
7553 if (reg->smin_value < 0) { in check_mem_size_reg()
7556 return -EACCES; in check_mem_size_reg()
7559 if (reg->umin_value == 0 && !zero_size_allowed) { in check_mem_size_reg()
7560 verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n", in check_mem_size_reg()
7561 regno, reg->umin_value, reg->umax_value); in check_mem_size_reg()
7562 return -EACCES; in check_mem_size_reg()
7565 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { in check_mem_size_reg()
7568 return -EACCES; in check_mem_size_reg()
7570 err = check_helper_mem_access(env, regno - 1, reg->umax_value, in check_mem_size_reg()
7580 bool may_be_null = type_may_be_null(reg->type); in check_mem_reg()
7589 * the conversion shouldn't be visible to a caller. in check_mem_reg()
7608 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; in check_kfunc_mem_size_reg()
7609 bool may_be_null = type_may_be_null(mem_reg->type); in check_kfunc_mem_size_reg()
7635 * Two bpf_map_lookups (even with the same key) will have different reg->id.
7636 * Two separate bpf_obj_new will also have different reg->id.
7638 * clears reg->id after value_or_null->value transition, since the verifier only
7642 * reg->id > 0 after value_or_null->value transition. By doing so
7647 * dead-locks.
7651 * cur_state->active_lock remembers which map value element or allocated
7658 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
7659 bool is_const = tnum_is_const(reg->var_off); in process_spin_lock()
7660 u64 val = reg->var_off.value; in process_spin_lock()
7669 return -EINVAL; in process_spin_lock()
7671 if (reg->type == PTR_TO_MAP_VALUE) { in process_spin_lock()
7672 map = reg->map_ptr; in process_spin_lock()
7673 if (!map->btf) { in process_spin_lock()
7676 map->name); in process_spin_lock()
7677 return -EINVAL; in process_spin_lock()
7680 btf = reg->btf; in process_spin_lock()
7686 map ? map->name : "kptr"); in process_spin_lock()
7687 return -EINVAL; in process_spin_lock()
7689 if (rec->spin_lock_off != val + reg->off) { in process_spin_lock()
7691 val + reg->off, rec->spin_lock_off); in process_spin_lock()
7692 return -EINVAL; in process_spin_lock()
7695 if (cur->active_lock.ptr) { in process_spin_lock()
7698 return -EINVAL; in process_spin_lock()
7701 cur->active_lock.ptr = map; in process_spin_lock()
7703 cur->active_lock.ptr = btf; in process_spin_lock()
7704 cur->active_lock.id = reg->id; in process_spin_lock()
7713 if (!cur->active_lock.ptr) { in process_spin_lock()
7715 return -EINVAL; in process_spin_lock()
7717 if (cur->active_lock.ptr != ptr || in process_spin_lock()
7718 cur->active_lock.id != reg->id) { in process_spin_lock()
7720 return -EINVAL; in process_spin_lock()
7725 cur->active_lock.ptr = NULL; in process_spin_lock()
7726 cur->active_lock.id = 0; in process_spin_lock()
7735 bool is_const = tnum_is_const(reg->var_off); in process_timer_func()
7736 struct bpf_map *map = reg->map_ptr; in process_timer_func()
7737 u64 val = reg->var_off.value; in process_timer_func()
7743 return -EINVAL; in process_timer_func()
7745 if (!map->btf) { in process_timer_func()
7747 map->name); in process_timer_func()
7748 return -EINVAL; in process_timer_func()
7750 if (!btf_record_has_field(map->record, BPF_TIMER)) { in process_timer_func()
7751 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); in process_timer_func()
7752 return -EINVAL; in process_timer_func()
7754 if (map->record->timer_off != val + reg->off) { in process_timer_func()
7756 val + reg->off, map->record->timer_off); in process_timer_func()
7757 return -EINVAL; in process_timer_func()
7759 if (meta->map_ptr) { in process_timer_func()
7761 return -EFAULT; in process_timer_func()
7763 meta->map_uid = reg->map_uid; in process_timer_func()
7764 meta->map_ptr = map; in process_timer_func()
7772 struct bpf_map *map = reg->map_ptr; in process_wq_func()
7773 u64 val = reg->var_off.value; in process_wq_func()
7775 if (map->record->wq_off != val + reg->off) { in process_wq_func()
7777 val + reg->off, map->record->wq_off); in process_wq_func()
7778 return -EINVAL; in process_wq_func()
7780 meta->map.uid = reg->map_uid; in process_wq_func()
7781 meta->map.ptr = map; in process_wq_func()
7794 if (type_is_ptr_alloc_obj(reg->type)) { in process_kptr_func()
7797 map_ptr = reg->map_ptr; in process_kptr_func()
7798 if (!map_ptr->btf) { in process_kptr_func()
7800 map_ptr->name); in process_kptr_func()
7801 return -EINVAL; in process_kptr_func()
7803 rec = map_ptr->record; in process_kptr_func()
7804 meta->map_ptr = map_ptr; in process_kptr_func()
7807 if (!tnum_is_const(reg->var_off)) { in process_kptr_func()
7811 return -EINVAL; in process_kptr_func()
7816 return -EINVAL; in process_kptr_func()
7819 kptr_off = reg->off + reg->var_off.value; in process_kptr_func()
7823 return -EACCES; in process_kptr_func()
7825 if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { in process_kptr_func()
7827 return -EACCES; in process_kptr_func()
7829 meta->kptr_field = kptr_field; in process_kptr_func()
7848 * reg->type and the memory's in reg->dynptr.type), but there is no support for
7864 if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { in process_dynptr_func()
7868 return -EINVAL; in process_dynptr_func()
7876 return -EFAULT; in process_dynptr_func()
7879 /* MEM_UNINIT - Points to memory that is an appropriate candidate for in process_dynptr_func()
7886 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be in process_dynptr_func()
7890 * None - Points to a initialized dynptr that can be mutated and in process_dynptr_func()
7899 return -EINVAL; in process_dynptr_func()
7905 i, BPF_DW, BPF_WRITE, -1, false, false); in process_dynptr_func()
7912 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ in process_dynptr_func()
7913 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { in process_dynptr_func()
7915 return -EINVAL; in process_dynptr_func()
7922 return -EINVAL; in process_dynptr_func()
7930 return -EINVAL; in process_dynptr_func()
7942 return state->stack[spi].spilled_ptr.ref_obj_id; in iter_ref_obj_id()
7947 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); in is_iter_kfunc()
7952 return meta->kfunc_flags & KF_ITER_NEW; in is_iter_new_kfunc()
7957 return meta->kfunc_flags & KF_ITER_NEXT; in is_iter_next_kfunc()
7962 return meta->kfunc_flags & KF_ITER_DESTROY; in is_iter_destroy_kfunc()
7975 return btf_param_match_suffix(meta->btf, arg, "__iter"); in is_kfunc_arg_iter()
7991 btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); in process_iter_arg()
7994 return -EINVAL; in process_iter_arg()
7996 t = btf_type_by_id(meta->btf, btf_id); in process_iter_arg()
7997 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
8003 iter_type_str(meta->btf, btf_id), regno); in process_iter_arg()
8004 return -EINVAL; in process_iter_arg()
8009 i, BPF_DW, BPF_WRITE, -1, false, false); in process_iter_arg()
8014 err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); in process_iter_arg()
8021 err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); in process_iter_arg()
8025 case -EINVAL: in process_iter_arg()
8027 iter_type_str(meta->btf, btf_id), regno); in process_iter_arg()
8029 case -EPROTO: in process_iter_arg()
8030 verbose(env, "expected an RCU CS when using %s\n", meta->func_name); in process_iter_arg()
8044 /* remember meta->iter info for process_iter_next_call() */ in process_iter_arg()
8045 meta->iter.spi = spi; in process_iter_arg()
8046 meta->iter.frameno = reg->frameno; in process_iter_arg()
8047 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); in process_iter_arg()
8060 * stopped at insn_idx with callsites matching those in cur->frame.
8071 for (; sl; sl = sl->next) { in find_prev_entry()
8072 /* If st->branches != 0 state is a part of current DFS verification path, in find_prev_entry()
8075 st = &sl->state; in find_prev_entry()
8076 if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && in find_prev_entry()
8077 st->dfs_depth < cur->dfs_depth) in find_prev_entry()
8093 if (rold->type != SCALAR_VALUE) in maybe_widen_reg()
8095 if (rold->type != rcur->type) in maybe_widen_reg()
8097 if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) in maybe_widen_reg()
8110 for (fr = old->curframe; fr >= 0; fr--) { in widen_imprecise_scalars()
8111 fold = old->frame[fr]; in widen_imprecise_scalars()
8112 fcur = cur->frame[fr]; in widen_imprecise_scalars()
8116 &fold->regs[i], in widen_imprecise_scalars()
8117 &fcur->regs[i], in widen_imprecise_scalars()
8118 &env->idmap_scratch); in widen_imprecise_scalars()
8120 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { in widen_imprecise_scalars()
8121 if (!is_spilled_reg(&fold->stack[i]) || in widen_imprecise_scalars()
8122 !is_spilled_reg(&fcur->stack[i])) in widen_imprecise_scalars()
8126 &fold->stack[i].spilled_ptr, in widen_imprecise_scalars()
8127 &fcur->stack[i].spilled_ptr, in widen_imprecise_scalars()
8128 &env->idmap_scratch); in widen_imprecise_scalars()
8137 int iter_frameno = meta->iter.frameno; in get_iter_from_state()
8138 int iter_spi = meta->iter.spi; in get_iter_from_state()
8140 return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; in get_iter_from_state()
8163 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
8224 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in process_iter_next_call()
8225 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; in process_iter_next_call()
8232 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && in process_iter_next_call()
8233 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { in process_iter_next_call()
8235 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); in process_iter_next_call()
8236 return -EFAULT; in process_iter_next_call()
8239 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { in process_iter_next_call()
8243 if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || in process_iter_next_call()
8244 !same_callsites(cur_st->parent, cur_st)) { in process_iter_next_call()
8246 return -EFAULT; in process_iter_next_call()
8248 /* Note cur_st->parent in the call below, it is necessary to skip in process_iter_next_call()
8252 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); in process_iter_next_call()
8256 return -ENOMEM; in process_iter_next_call()
8259 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; in process_iter_next_call()
8260 queued_iter->iter.depth++; in process_iter_next_call()
8264 queued_fr = queued_st->frame[queued_st->curframe]; in process_iter_next_call()
8265 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); in process_iter_next_call()
8270 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; in process_iter_next_call()
8271 __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]); in process_iter_next_call()
8302 if (!meta->map_ptr) { in resolve_map_arg_type()
8304 verbose(env, "invalid map_ptr to access map->type\n"); in resolve_map_arg_type()
8305 return -EACCES; in resolve_map_arg_type()
8308 switch (meta->map_ptr->map_type) { in resolve_map_arg_type()
8315 return -EINVAL; in resolve_map_arg_type()
8319 if (meta->func_id == BPF_FUNC_map_peek_elem) in resolve_map_arg_type()
8445 enum bpf_reg_type expected, type = reg->type; in check_reg_type()
8452 return -EFAULT; in check_reg_type()
8475 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) { in check_reg_type()
8480 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { in check_reg_type()
8481 expected = compatible->types[i]; in check_reg_type()
8489 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); in check_reg_type()
8491 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); in check_reg_type()
8492 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); in check_reg_type()
8493 return -EACCES; in check_reg_type()
8496 if (base_type(reg->type) != PTR_TO_BTF_ID) in check_reg_type()
8503 func_id_name(meta->func_id), in check_reg_type()
8504 regno, reg_type_str(env, reg->type)); in check_reg_type()
8505 return -EACCES; in check_reg_type()
8510 switch ((int)reg->type) { in check_reg_type()
8523 meta->func_id != BPF_FUNC_sk_release; in check_reg_type()
8525 if (type_may_be_null(reg->type) && in check_reg_type()
8528 return -EACCES; in check_reg_type()
8532 if (!compatible->btf_id) { in check_reg_type()
8534 return -EFAULT; in check_reg_type()
8536 arg_btf_id = compatible->btf_id; in check_reg_type()
8539 if (meta->func_id == BPF_FUNC_kptr_xchg) { in check_reg_type()
8540 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8541 return -EACCES; in check_reg_type()
8545 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", in check_reg_type()
8547 return -EACCES; in check_reg_type()
8550 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in check_reg_type()
8554 regno, btf_type_name(reg->btf, reg->btf_id), in check_reg_type()
8556 return -EACCES; in check_reg_type()
8563 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && in check_reg_type()
8564 meta->func_id != BPF_FUNC_kptr_xchg) { in check_reg_type()
8566 return -EFAULT; in check_reg_type()
8569 if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) { in check_reg_type()
8570 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8571 return -EACCES; in check_reg_type()
8581 return -EFAULT; in check_reg_type()
8607 u32 type = reg->type; in check_func_arg_reg_off()
8613 * meta->release_regno. in check_func_arg_reg_off()
8629 if (reg->off) { in check_func_arg_reg_off()
8632 return -EINVAL; in check_func_arg_reg_off()
8663 * can be non-zero. This was already checked above. So pass in check_func_arg_reg_off()
8682 if (arg_type_is_dynptr(fn->arg_type[i])) { in get_dynptr_arg_reg()
8701 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_id()
8702 return reg->id; in dynptr_id()
8706 return state->stack[spi].spilled_ptr.id; in dynptr_id()
8714 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_ref_obj_id()
8715 return reg->ref_obj_id; in dynptr_ref_obj_id()
8719 return state->stack[spi].spilled_ptr.ref_obj_id; in dynptr_ref_obj_id()
8728 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_get_type()
8729 return reg->dynptr.type; in dynptr_get_type()
8731 spi = __get_spi(reg->off); in dynptr_get_type()
8737 return state->stack[spi].spilled_ptr.dynptr.type; in dynptr_get_type()
8743 struct bpf_map *map = reg->map_ptr; in check_reg_const_str()
8749 if (reg->type != PTR_TO_MAP_VALUE) in check_reg_const_str()
8750 return -EINVAL; in check_reg_const_str()
8754 return -EACCES; in check_reg_const_str()
8757 if (!tnum_is_const(reg->var_off)) { in check_reg_const_str()
8759 return -EACCES; in check_reg_const_str()
8762 if (!map->ops->map_direct_value_addr) { in check_reg_const_str()
8764 return -EACCES; in check_reg_const_str()
8767 err = check_map_access(env, regno, reg->off, in check_reg_const_str()
8768 map->value_size - reg->off, false, in check_reg_const_str()
8773 map_off = reg->off + reg->var_off.value; in check_reg_const_str()
8774 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); in check_reg_const_str()
8781 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { in check_reg_const_str()
8782 verbose(env, "string is not zero-terminated\n"); in check_reg_const_str()
8783 return -EINVAL; in check_reg_const_str()
8795 enum bpf_arg_type arg_type = fn->arg_type[arg]; in check_func_arg()
8796 enum bpf_reg_type type = reg->type; in check_func_arg()
8811 return -EACCES; in check_func_arg()
8819 return -EACCES; in check_func_arg()
8837 arg_btf_id = fn->arg_btf_id[arg]; in check_func_arg()
8858 if (reg->type == PTR_TO_STACK) { in check_func_arg()
8860 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { in check_func_arg()
8862 return -EINVAL; in check_func_arg()
8866 return -EINVAL; in check_func_arg()
8868 } else if (!reg->ref_obj_id && !register_is_null(reg)) { in check_func_arg()
8871 return -EINVAL; in check_func_arg()
8873 if (meta->release_regno) { in check_func_arg()
8875 return -EFAULT; in check_func_arg()
8877 meta->release_regno = regno; in check_func_arg()
8880 if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) { in check_func_arg()
8881 if (meta->ref_obj_id) { in check_func_arg()
8883 regno, reg->ref_obj_id, in check_func_arg()
8884 meta->ref_obj_id); in check_func_arg()
8885 return -EFAULT; in check_func_arg()
8887 meta->ref_obj_id = reg->ref_obj_id; in check_func_arg()
8893 if (meta->map_ptr) { in check_func_arg()
8906 if (meta->map_ptr != reg->map_ptr || in check_func_arg()
8907 meta->map_uid != reg->map_uid) { in check_func_arg()
8910 meta->map_uid, reg->map_uid); in check_func_arg()
8911 return -EINVAL; in check_func_arg()
8914 meta->map_ptr = reg->map_ptr; in check_func_arg()
8915 meta->map_uid = reg->map_uid; in check_func_arg()
8919 * check that [key, key + map->key_size) are within in check_func_arg()
8922 if (!meta->map_ptr) { in check_func_arg()
8928 verbose(env, "invalid map_ptr to access map->key\n"); in check_func_arg()
8929 return -EACCES; in check_func_arg()
8931 err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, in check_func_arg()
8939 * check [value, value + map->value_size) validity in check_func_arg()
8941 if (!meta->map_ptr) { in check_func_arg()
8943 verbose(env, "invalid map_ptr to access map->value\n"); in check_func_arg()
8944 return -EACCES; in check_func_arg()
8946 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
8947 err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, in check_func_arg()
8952 if (!reg->btf_id) { in check_func_arg()
8954 return -EACCES; in check_func_arg()
8956 meta->ret_btf = reg->btf; in check_func_arg()
8957 meta->ret_btf_id = reg->btf_id; in check_func_arg()
8962 return -EACCES; in check_func_arg()
8964 if (meta->func_id == BPF_FUNC_spin_lock) { in check_func_arg()
8968 } else if (meta->func_id == BPF_FUNC_spin_unlock) { in check_func_arg()
8974 return -EFAULT; in check_func_arg()
8983 meta->subprogno = reg->subprogno; in check_func_arg()
8989 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
8991 err = check_helper_mem_access(env, regno, fn->arg_size[arg], in check_func_arg()
8997 err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); in check_func_arg()
9002 fn->arg_type[arg - 1] & MEM_WRITE ? in check_func_arg()
9008 fn->arg_type[arg - 1] & MEM_WRITE ? in check_func_arg()
9018 if (!tnum_is_const(reg->var_off)) { in check_func_arg()
9021 return -EACCES; in check_func_arg()
9023 meta->mem_size = reg->var_off.value; in check_func_arg()
9047 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
9048 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
9085 return env->prog->jit_requested && in allow_tail_call_in_subprogs()
9096 switch (map->map_type) { in check_map_func_compatibility()
9142 /* Restrict bpf side of cpumap and xskmap, open when use-cases in check_map_func_compatibility()
9224 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in check_map_func_compatibility()
9226 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { in check_map_func_compatibility()
9227 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in check_map_func_compatibility()
9228 return -EINVAL; in check_map_func_compatibility()
9236 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) in check_map_func_compatibility()
9245 if (map->map_type != BPF_MAP_TYPE_RINGBUF) in check_map_func_compatibility()
9249 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) in check_map_func_compatibility()
9253 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) in check_map_func_compatibility()
9258 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) in check_map_func_compatibility()
9262 if (map->map_type != BPF_MAP_TYPE_DEVMAP && in check_map_func_compatibility()
9263 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && in check_map_func_compatibility()
9264 map->map_type != BPF_MAP_TYPE_CPUMAP && in check_map_func_compatibility()
9265 map->map_type != BPF_MAP_TYPE_XSKMAP) in check_map_func_compatibility()
9271 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) in check_map_func_compatibility()
9277 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
9281 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in check_map_func_compatibility()
9282 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in check_map_func_compatibility()
9286 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && in check_map_func_compatibility()
9287 map->map_type != BPF_MAP_TYPE_SOCKMAP && in check_map_func_compatibility()
9288 map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
9292 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
9293 map->map_type != BPF_MAP_TYPE_STACK) in check_map_func_compatibility()
9298 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
9299 map->map_type != BPF_MAP_TYPE_STACK && in check_map_func_compatibility()
9300 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) in check_map_func_compatibility()
9304 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in check_map_func_compatibility()
9305 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in check_map_func_compatibility()
9306 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) in check_map_func_compatibility()
9311 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) in check_map_func_compatibility()
9316 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) in check_map_func_compatibility()
9321 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) in check_map_func_compatibility()
9326 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) in check_map_func_compatibility()
9336 map->map_type, func_id_name(func_id), func_id); in check_map_func_compatibility()
9337 return -EINVAL; in check_map_func_compatibility()
9344 if (arg_type_is_raw_mem(fn->arg1_type)) in check_raw_mode_ok()
9346 if (arg_type_is_raw_mem(fn->arg2_type)) in check_raw_mode_ok()
9348 if (arg_type_is_raw_mem(fn->arg3_type)) in check_raw_mode_ok()
9350 if (arg_type_is_raw_mem(fn->arg4_type)) in check_raw_mode_ok()
9352 if (arg_type_is_raw_mem(fn->arg5_type)) in check_raw_mode_ok()
9364 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; in check_args_pair_invalid()
9365 bool has_size = fn->arg_size[arg] != 0; in check_args_pair_invalid()
9368 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) in check_args_pair_invalid()
9369 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); in check_args_pair_invalid()
9371 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) in check_args_pair_invalid()
9384 if (arg_type_is_mem_size(fn->arg1_type) || in check_arg_pair_ok()
9399 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { in check_btf_id_ok()
9400 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) in check_btf_id_ok()
9401 return !!fn->arg_btf_id[i]; in check_btf_id_ok()
9402 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) in check_btf_id_ok()
9403 return fn->arg_btf_id[i] == BPF_PTR_POISON; in check_btf_id_ok()
9404 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && in check_btf_id_ok()
9406 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || in check_btf_id_ok()
9407 !(fn->arg_type[i] & MEM_FIXED_SIZE))) in check_btf_id_ok()
9418 check_btf_id_ok(fn) ? 0 : -EINVAL; in check_func_proto()
9432 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in clear_all_pkt_pointers()
9439 AT_PKT_END = -1,
9440 BEYOND_PKT_END = -2,
9445 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_pkt_end()
9446 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
9448 if (reg->type != PTR_TO_PACKET) in mark_pkt_end()
9459 reg->range = BEYOND_PKT_END; in mark_pkt_end()
9461 reg->range = AT_PKT_END; in mark_pkt_end()
9478 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in release_reference()
9479 if (reg->ref_obj_id == ref_obj_id) in release_reference()
9491 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in invalidate_non_owning_refs()
9492 if (type_is_non_owning_ref(reg->type)) in invalidate_non_owning_refs()
9502 /* after the call registers r0 - r5 were scratched */ in clear_caller_saved_regs()
9525 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in setup_func_entry()
9527 state->curframe + 2); in setup_func_entry()
9528 return -E2BIG; in setup_func_entry()
9531 if (state->frame[state->curframe + 1]) { in setup_func_entry()
9533 state->curframe + 1); in setup_func_entry()
9534 return -EFAULT; in setup_func_entry()
9537 caller = state->frame[state->curframe]; in setup_func_entry()
9540 return -ENOMEM; in setup_func_entry()
9541 state->frame[state->curframe + 1] = callee; in setup_func_entry()
9543 /* callee cannot access r0, r6 - r9 for reading and has to write in setup_func_entry()
9550 state->curframe + 1 /* frameno within this callchain */, in setup_func_entry()
9559 state->curframe++; in setup_func_entry()
9565 state->frame[state->curframe + 1] = NULL; in setup_func_entry()
9574 struct bpf_verifier_log *log = &env->log; in btf_check_func_arg_match()
9585 for (i = 0; i < sub->arg_cnt; i++) { in btf_check_func_arg_match()
9588 struct bpf_subprog_arg_info *arg = &sub->args[i]; in btf_check_func_arg_match()
9590 if (arg->arg_type == ARG_ANYTHING) { in btf_check_func_arg_match()
9591 if (reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
9593 return -EINVAL; in btf_check_func_arg_match()
9595 } else if (arg->arg_type == ARG_PTR_TO_CTX) { in btf_check_func_arg_match()
9602 if (reg->type != PTR_TO_CTX) { in btf_check_func_arg_match()
9604 return -EINVAL; in btf_check_func_arg_match()
9606 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in btf_check_func_arg_match()
9610 if (check_mem_reg(env, reg, regno, arg->mem_size)) in btf_check_func_arg_match()
9611 return -EINVAL; in btf_check_func_arg_match()
9612 if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) { in btf_check_func_arg_match()
9613 bpf_log(log, "arg#%d is expected to be non-NULL\n", i); in btf_check_func_arg_match()
9614 return -EINVAL; in btf_check_func_arg_match()
9616 } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { in btf_check_func_arg_match()
9622 * run-time debug nightmare. in btf_check_func_arg_match()
9624 if (reg->type != PTR_TO_ARENA && reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
9626 return -EINVAL; in btf_check_func_arg_match()
9628 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in btf_check_func_arg_match()
9633 ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); in btf_check_func_arg_match()
9636 } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { in btf_check_func_arg_match()
9640 if (register_is_null(reg) && type_may_be_null(arg->arg_type)) in btf_check_func_arg_match()
9644 err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta); in btf_check_func_arg_match()
9645 err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type); in btf_check_func_arg_match()
9650 i, arg->arg_type); in btf_check_func_arg_match()
9651 return -EFAULT; in btf_check_func_arg_match()
9660 * EFAULT - there is a verifier bug. Abort verification.
9661 * EINVAL - there is a type mismatch or BTF is not available.
9662 * 0 - BTF matches with what bpf_reg_state expects.
9668 struct bpf_prog *prog = env->prog; in btf_check_subprog_call()
9669 struct btf *btf = prog->aux->btf; in btf_check_subprog_call()
9673 if (!prog->aux->func_info) in btf_check_subprog_call()
9674 return -EINVAL; in btf_check_subprog_call()
9676 btf_id = prog->aux->func_info[subprog].type_id; in btf_check_subprog_call()
9678 return -EFAULT; in btf_check_subprog_call()
9680 if (prog->aux->func_info_aux[subprog].unreliable) in btf_check_subprog_call()
9681 return -EINVAL; in btf_check_subprog_call()
9689 prog->aux->func_info_aux[subprog].unreliable = true; in btf_check_subprog_call()
9697 struct bpf_verifier_state *state = env->cur_state, *callback_state; in push_callback_call()
9701 caller = state->frame[state->curframe]; in push_callback_call()
9702 err = btf_check_subprog_call(env, subprog, caller->regs); in push_callback_call()
9703 if (err == -EFAULT) in push_callback_call()
9710 env->subprog_info[subprog].is_cb = true; in push_callback_call()
9712 !is_callback_calling_kfunc(insn->imm)) { in push_callback_call()
9713 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", in push_callback_call()
9714 func_id_name(insn->imm), insn->imm); in push_callback_call()
9715 return -EFAULT; in push_callback_call()
9717 !is_callback_calling_function(insn->imm)) { /* helper */ in push_callback_call()
9718 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", in push_callback_call()
9719 func_id_name(insn->imm), insn->imm); in push_callback_call()
9720 return -EFAULT; in push_callback_call()
9727 env->subprog_info[subprog].is_async_cb = true; in push_callback_call()
9728 async_cb = push_async_cb(env, env->subprog_info[subprog].start, in push_callback_call()
9730 is_bpf_wq_set_callback_impl_kfunc(insn->imm)); in push_callback_call()
9732 return -EFAULT; in push_callback_call()
9733 callee = async_cb->frame[0]; in push_callback_call()
9734 callee->async_entry_cnt = caller->async_entry_cnt + 1; in push_callback_call()
9747 callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); in push_callback_call()
9749 return -ENOMEM; in push_callback_call()
9756 callback_state->callback_unroll_depth++; in push_callback_call()
9757 callback_state->frame[callback_state->curframe - 1]->callback_depth++; in push_callback_call()
9758 caller->callback_depth = 0; in push_callback_call()
9765 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
9769 target_insn = *insn_idx + insn->imm + 1; in check_func_call()
9773 return -EFAULT; in check_func_call()
9776 caller = state->frame[state->curframe]; in check_func_call()
9777 err = btf_check_subprog_call(env, subprog, caller->regs); in check_func_call()
9778 if (err == -EFAULT) in check_func_call()
9784 if (env->cur_state->active_lock.ptr) { in check_func_call()
9787 return -EINVAL; in check_func_call()
9791 if (env->cur_state->active_preempt_lock) { in check_func_call()
9794 return -EINVAL; in check_func_call()
9806 subprog_aux(env, subprog)->called = true; in check_func_call()
9807 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9809 /* All global functions return a 64-bit SCALAR_VALUE */ in check_func_call()
9810 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
9811 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_func_call()
9824 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9827 *insn_idx = env->subprog_info[subprog].start - 1; in check_func_call()
9829 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
9833 print_verifier_state(env, state->frame[state->curframe], true); in check_func_call()
9848 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
9850 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
9851 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
9852 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9854 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
9855 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
9856 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9859 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
9862 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
9872 /* copy r1 - r5 args that callee can access. The copy includes parent in set_callee_state()
9876 callee->regs[i] = caller->regs[i]; in set_callee_state()
9885 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; in set_map_elem_callback_state()
9890 map = insn_aux->map_ptr_state.map_ptr; in set_map_elem_callback_state()
9891 if (!map->ops->map_set_for_each_callback_args || in set_map_elem_callback_state()
9892 !map->ops->map_for_each_callback) { in set_map_elem_callback_state()
9894 return -ENOTSUPP; in set_map_elem_callback_state()
9897 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
9901 callee->in_callback_fn = true; in set_map_elem_callback_state()
9902 callee->callback_ret_range = retval_range(0, 1); in set_map_elem_callback_state()
9915 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
9916 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
9919 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
9920 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
9921 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
9923 callee->in_callback_fn = true; in set_loop_callback_state()
9924 callee->callback_ret_range = retval_range(0, 1); in set_loop_callback_state()
9933 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; in set_timer_callback_state()
9938 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
9939 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
9940 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
9942 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
9943 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
9944 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
9946 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
9947 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
9948 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
9951 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
9952 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
9953 callee->in_async_callback_fn = true; in set_timer_callback_state()
9954 callee->callback_ret_range = retval_range(0, 1); in set_timer_callback_state()
9968 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
9970 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
9971 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
9972 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
9973 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA]; in set_find_vma_callback_state()
9976 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
9979 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
9980 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
9981 callee->in_callback_fn = true; in set_find_vma_callback_state()
9982 callee->callback_ret_range = retval_range(0, 1); in set_find_vma_callback_state()
9995 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
9996 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
9997 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
10000 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
10001 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
10002 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
10004 callee->in_callback_fn = true; in set_user_ringbuf_callback_state()
10005 callee->callback_ret_range = retval_range(0, 1); in set_user_ringbuf_callback_state()
10023 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, in set_rbtree_add_callback_state()
10025 if (!field || !field->graph_root.value_btf_id) in set_rbtree_add_callback_state()
10026 return -EFAULT; in set_rbtree_add_callback_state()
10028 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
10029 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
10030 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
10031 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
10033 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
10034 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
10035 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
10036 callee->in_callback_fn = true; in set_rbtree_add_callback_state()
10037 callee->callback_ret_range = retval_range(0, 1); in set_rbtree_add_callback_state()
10049 struct bpf_verifier_state *state = env->cur_state; in in_rbtree_lock_required_cb()
10050 struct bpf_insn *insn = env->prog->insnsi; in in_rbtree_lock_required_cb()
10054 if (!state->curframe) in in_rbtree_lock_required_cb()
10057 callee = state->frame[state->curframe]; in in_rbtree_lock_required_cb()
10059 if (!callee->in_callback_fn) in in_rbtree_lock_required_cb()
10062 kfunc_btf_id = insn[callee->callsite].imm; in in_rbtree_lock_required_cb()
10070 return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; in retval_range_within()
10072 return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; in retval_range_within()
10077 struct bpf_verifier_state *state = env->cur_state, *prev_st; in prepare_func_exit()
10083 callee = state->frame[state->curframe]; in prepare_func_exit()
10084 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
10085 if (r0->type == PTR_TO_STACK) { in prepare_func_exit()
10093 return -EINVAL; in prepare_func_exit()
10096 caller = state->frame[state->curframe - 1]; in prepare_func_exit()
10097 if (callee->in_callback_fn) { in prepare_func_exit()
10098 if (r0->type != SCALAR_VALUE) { in prepare_func_exit()
10100 return -EACCES; in prepare_func_exit()
10104 err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64); in prepare_func_exit()
10110 if (!retval_range_within(callee->callback_ret_range, r0, false)) { in prepare_func_exit()
10111 verbose_invalid_scalar(env, r0, callee->callback_ret_range, in prepare_func_exit()
10113 return -EINVAL; in prepare_func_exit()
10115 if (!calls_callback(env, callee->callsite)) { in prepare_func_exit()
10117 *insn_idx, callee->callsite); in prepare_func_exit()
10118 return -EFAULT; in prepare_func_exit()
10122 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
10130 if (!callee->in_callback_fn) { in prepare_func_exit()
10141 in_callback_fn = callee->in_callback_fn; in prepare_func_exit()
10143 *insn_idx = callee->callsite; in prepare_func_exit()
10145 *insn_idx = callee->callsite + 1; in prepare_func_exit()
10147 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
10156 state->frame[state->curframe--] = NULL; in prepare_func_exit()
10161 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } in prepare_func_exit()
10194 ret_reg->smax_value = meta->msize_max_value; in do_refine_retval_range()
10195 ret_reg->s32_max_value = meta->msize_max_value; in do_refine_retval_range()
10196 ret_reg->smin_value = -MAX_ERRNO; in do_refine_retval_range()
10197 ret_reg->s32_min_value = -MAX_ERRNO; in do_refine_retval_range()
10201 ret_reg->umax_value = nr_cpu_ids - 1; in do_refine_retval_range()
10202 ret_reg->u32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
10203 ret_reg->smax_value = nr_cpu_ids - 1; in do_refine_retval_range()
10204 ret_reg->s32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
10205 ret_reg->umin_value = 0; in do_refine_retval_range()
10206 ret_reg->u32_min_value = 0; in do_refine_retval_range()
10207 ret_reg->smin_value = 0; in do_refine_retval_range()
10208 ret_reg->s32_min_value = 0; in do_refine_retval_range()
10220 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
10221 struct bpf_map *map = meta->map_ptr; in record_func_map()
10237 return -EINVAL; in record_func_map()
10240 /* In case of read-only, some additional restrictions in record_func_map()
10244 if ((map->map_flags & BPF_F_RDONLY_PROG) && in record_func_map()
10250 return -EACCES; in record_func_map()
10253 if (!aux->map_ptr_state.map_ptr) in record_func_map()
10254 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
10255 !meta->map_ptr->bypass_spec_v1, false); in record_func_map()
10256 else if (aux->map_ptr_state.map_ptr != meta->map_ptr) in record_func_map()
10257 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
10258 !meta->map_ptr->bypass_spec_v1, true); in record_func_map()
10266 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
10268 struct bpf_map *map = meta->map_ptr; in record_func_key()
10274 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { in record_func_key()
10276 return -EINVAL; in record_func_key()
10280 val = reg->var_off.value; in record_func_key()
10281 max = map->max_entries; in record_func_key()
10305 if (!exception_exit && state->frameno && !state->in_callback_fn) in check_reference_leak()
10308 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
10309 if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno) in check_reference_leak()
10312 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
10315 return refs_lingering ? -EINVAL : 0; in check_reference_leak()
10323 struct bpf_map *fmt_map = fmt_reg->map_ptr; in check_bpf_snprintf_call()
10330 if (data_len_reg->var_off.value % 8) in check_bpf_snprintf_call()
10331 return -EINVAL; in check_bpf_snprintf_call()
10332 num_args = data_len_reg->var_off.value / 8; in check_bpf_snprintf_call()
10337 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; in check_bpf_snprintf_call()
10338 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, in check_bpf_snprintf_call()
10342 return -EFAULT; in check_bpf_snprintf_call()
10358 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_get_func_ip()
10362 if (!bpf_prog_has_trampoline(env->prog)) { in check_get_func_ip()
10365 return -ENOTSUPP; in check_get_func_ip()
10374 return -ENOTSUPP; in check_get_func_ip()
10379 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
10396 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; in update_loop_inline_state()
10398 if (!state->initialized) { in update_loop_inline_state()
10399 state->initialized = 1; in update_loop_inline_state()
10400 state->fit_for_inline = loop_flag_is_zero(env); in update_loop_inline_state()
10401 state->callback_subprogno = subprogno; in update_loop_inline_state()
10405 if (!state->fit_for_inline) in update_loop_inline_state()
10408 state->fit_for_inline = (loop_flag_is_zero(env) && in update_loop_inline_state()
10409 state->callback_subprogno == subprogno); in update_loop_inline_state()
10416 return -ERANGE; in get_helper_proto()
10418 if (!env->ops->get_func_proto) in get_helper_proto()
10419 return -EINVAL; in get_helper_proto()
10421 *ptr = env->ops->get_func_proto(func_id, env->prog); in get_helper_proto()
10422 return *ptr ? 0 : -EINVAL; in get_helper_proto()
10428 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_helper_call()
10440 func_id = insn->imm; in check_helper_call()
10441 err = get_helper_proto(env, insn->imm, &fn); in check_helper_call()
10442 if (err == -ERANGE) { in check_helper_call()
10444 return -EINVAL; in check_helper_call()
10453 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in check_helper_call()
10454 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
10455 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
10456 return -EINVAL; in check_helper_call()
10459 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
10461 return -EINVAL; in check_helper_call()
10464 if (!in_sleepable(env) && fn->might_sleep) { in check_helper_call()
10465 verbose(env, "helper call might sleep in a non-sleepable prog\n"); in check_helper_call()
10466 return -EINVAL; in check_helper_call()
10470 changes_data = bpf_helper_changes_pkt_data(fn->func); in check_helper_call()
10471 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { in check_helper_call()
10474 return -EINVAL; in check_helper_call()
10478 meta.pkt_access = fn->pkt_access; in check_helper_call()
10487 if (env->cur_state->active_rcu_lock) { in check_helper_call()
10488 if (fn->might_sleep) { in check_helper_call()
10491 return -EINVAL; in check_helper_call()
10495 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
10498 if (env->cur_state->active_preempt_lock) { in check_helper_call()
10499 if (fn->might_sleep) { in check_helper_call()
10500 verbose(env, "sleepable helper %s#%d in non-preemptible region\n", in check_helper_call()
10502 return -EINVAL; in check_helper_call()
10506 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
10530 BPF_WRITE, -1, false, false); in check_helper_call()
10538 err = -EINVAL; in check_helper_call()
10543 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { in check_helper_call()
10546 return -EFAULT; in check_helper_call()
10557 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in check_helper_call()
10558 if (reg->ref_obj_id == ref_obj_id) { in check_helper_call()
10559 if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { in check_helper_call()
10560 reg->ref_obj_id = 0; in check_helper_call()
10561 reg->type &= ~MEM_ALLOC; in check_helper_call()
10562 reg->type |= MEM_RCU; in check_helper_call()
10597 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
10598 return -EINVAL; in check_helper_call()
10624 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { in check_helper_call()
10628 cur_func(env)->callback_depth = 0; in check_helper_call()
10629 if (env->log.level & BPF_LOG_LEVEL2) in check_helper_call()
10631 env->cur_state->curframe); in check_helper_call()
10638 return -EACCES; in check_helper_call()
10643 env->prog->expected_attach_type == BPF_LSM_CGROUP) { in check_helper_call()
10644 if (!env->prog->aux->attach_func_proto->type) { in check_helper_call()
10649 return -EINVAL; in check_helper_call()
10660 return -EFAULT; in check_helper_call()
10665 return -EFAULT; in check_helper_call()
10669 return -EFAULT; in check_helper_call()
10696 return -EFAULT; in check_helper_call()
10700 return -EFAULT; in check_helper_call()
10716 if (reg->type & MEM_RCU) { in check_helper_call()
10717 type = btf_type_by_id(reg->btf, reg->btf_id); in check_helper_call()
10720 return -EFAULT; in check_helper_call()
10723 env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; in check_helper_call()
10742 /* helper call returns 64-bit value. */ in check_helper_call()
10746 ret_type = fn->ret_type; in check_helper_call()
10767 return -EINVAL; in check_helper_call()
10773 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) { in check_helper_call()
10774 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10808 tname = btf_name_by_offset(meta.ret_btf, t->name_off); in check_helper_call()
10811 return -EINVAL; in check_helper_call()
10841 ret_btf = meta.kptr_field->kptr.btf; in check_helper_call()
10842 ret_btf_id = meta.kptr_field->kptr.btf_id; in check_helper_call()
10845 if (meta.kptr_field->type == BPF_KPTR_PERCPU) in check_helper_call()
10849 if (fn->ret_btf_id == BPF_PTR_POISON) { in check_helper_call()
10851 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", in check_helper_call()
10853 return -EINVAL; in check_helper_call()
10856 ret_btf_id = *fn->ret_btf_id; in check_helper_call()
10862 return -EINVAL; in check_helper_call()
10871 return -EINVAL; in check_helper_call()
10875 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10880 return -EFAULT; in check_helper_call()
10900 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); in check_helper_call()
10910 !env->prog->has_callchain_buf) { in check_helper_call()
10917 err = -ENOTSUPP; in check_helper_call()
10925 env->prog->has_callchain_buf = true; in check_helper_call()
10929 env->prog->call_get_stack = true; in check_helper_call()
10933 return -ENOTSUPP; in check_helper_call()
10934 env->prog->call_get_func_ip = true; in check_helper_call()
10952 reg->live |= REG_LIVE_WRITTEN; in mark_btf_func_reg_size()
10953 reg->subreg_def = reg_size == sizeof(u64) ? in mark_btf_func_reg_size()
10954 DEF_NOT_SUBREG : env->insn_idx + 1; in mark_btf_func_reg_size()
10959 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in mark_btf_func_reg_size()
10961 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); in mark_btf_func_reg_size()
10968 return meta->kfunc_flags & KF_ACQUIRE; in is_kfunc_acquire()
10973 return meta->kfunc_flags & KF_RELEASE; in is_kfunc_release()
10978 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); in is_kfunc_trusted_args()
10983 return meta->kfunc_flags & KF_SLEEPABLE; in is_kfunc_sleepable()
10988 return meta->kfunc_flags & KF_DESTRUCTIVE; in is_kfunc_destructive()
10993 return meta->kfunc_flags & KF_RCU; in is_kfunc_rcu()
10998 return meta->kfunc_flags & KF_RCU_PROTECTED; in is_kfunc_rcu_protected()
11007 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_mem_size()
11008 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_mem_size()
11020 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_const_mem_size()
11021 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_const_mem_size()
11079 param_name = btf_name_by_offset(btf, arg->name_off); in is_kfunc_arg_scalar_with_name()
11114 t = btf_type_skip_modifiers(btf, arg->type, NULL); in BTF_ID()
11119 t = btf_type_skip_modifiers(btf, t->type, &res_id); in BTF_ID()
11160 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); in is_kfunc_arg_callback()
11182 member_type = btf_type_skip_modifiers(btf, member->type, NULL); in __btf_type_is_scalar_struct()
11194 if (!array->nelems) in __btf_type_is_scalar_struct()
11196 member_type = btf_type_skip_modifiers(btf, array->type, NULL); in __btf_type_is_scalar_struct()
11323 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in BTF_ID()
11324 meta->arg_owning_ref) { in BTF_ID()
11328 return meta->kfunc_flags & KF_RET_NULL; in BTF_ID()
11333 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; in is_kfunc_bpf_rcu_read_lock()
11338 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; in is_kfunc_bpf_rcu_read_unlock()
11343 return meta->func_id == special_kfunc_list[KF_bpf_preempt_disable]; in is_kfunc_bpf_preempt_disable()
11348 return meta->func_id == special_kfunc_list[KF_bpf_preempt_enable]; in is_kfunc_bpf_preempt_enable()
11363 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) in get_kfunc_ptr_arg_type()
11371 if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) in get_kfunc_ptr_arg_type()
11374 if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) in get_kfunc_ptr_arg_type()
11377 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11380 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11383 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11389 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11392 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11395 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11398 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11401 if (is_kfunc_arg_const_str(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11404 if (is_kfunc_arg_map(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11407 if (is_kfunc_arg_wq(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11410 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { in get_kfunc_ptr_arg_type()
11413 meta->func_name, argno, btf_type_str(ref_t), ref_tname); in get_kfunc_ptr_arg_type()
11414 return -EINVAL; in get_kfunc_ptr_arg_type()
11419 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11423 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) || in get_kfunc_ptr_arg_type()
11424 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))) in get_kfunc_ptr_arg_type()
11432 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && in get_kfunc_ptr_arg_type()
11436 return -EINVAL; in get_kfunc_ptr_arg_type()
11456 if (base_type(reg->type) == PTR_TO_BTF_ID) { in process_kf_arg_ptr_to_btf_id()
11457 reg_btf = reg->btf; in process_kf_arg_ptr_to_btf_id()
11458 reg_ref_id = reg->btf_id; in process_kf_arg_ptr_to_btf_id()
11461 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; in process_kf_arg_ptr_to_btf_id()
11465 * or releasing a reference, or are no-cast aliases. We do _not_ in process_kf_arg_ptr_to_btf_id()
11488 if ((is_kfunc_release(meta) && reg->ref_obj_id) || in process_kf_arg_ptr_to_btf_id()
11489 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) in process_kf_arg_ptr_to_btf_id()
11493 (reg->off || !tnum_is_const(reg->var_off) || in process_kf_arg_ptr_to_btf_id()
11494 reg->var_off.value)); in process_kf_arg_ptr_to_btf_id()
11497 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); in process_kf_arg_ptr_to_btf_id()
11498 …struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, st… in process_kf_arg_ptr_to_btf_id()
11500 * actually use it -- it must cast to the underlying type. So we allow in process_kf_arg_ptr_to_btf_id()
11506 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, in process_kf_arg_ptr_to_btf_id()
11508 return -EINVAL; in process_kf_arg_ptr_to_btf_id()
11515 struct bpf_verifier_state *state = env->cur_state; in ref_set_non_owning()
11518 if (!state->active_lock.ptr) { in ref_set_non_owning()
11520 return -EFAULT; in ref_set_non_owning()
11523 if (type_flag(reg->type) & NON_OWN_REF) { in ref_set_non_owning()
11525 return -EFAULT; in ref_set_non_owning()
11528 reg->type |= NON_OWN_REF; in ref_set_non_owning()
11529 if (rec->refcount_off >= 0) in ref_set_non_owning()
11530 reg->type |= MEM_RCU; in ref_set_non_owning()
11545 "owning -> non-owning conversion\n"); in ref_convert_owning_non_owning()
11546 return -EFAULT; in ref_convert_owning_non_owning()
11549 for (i = 0; i < state->acquired_refs; i++) { in ref_convert_owning_non_owning()
11550 if (state->refs[i].id != ref_obj_id) in ref_convert_owning_non_owning()
11556 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in ref_convert_owning_non_owning()
11557 if (reg->ref_obj_id == ref_obj_id) { in ref_convert_owning_non_owning()
11558 reg->ref_obj_id = 0; in ref_convert_owning_non_owning()
11566 return -EFAULT; in ref_convert_owning_non_owning()
11578 * allocation, the verifier preserves a unique reg->id for it.
11590 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
11591 * allocated objects is the reg->btf pointer.
11593 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
11604 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
11606 * will get different reg->id assigned to each lookup, hence different
11609 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
11610 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
11611 * returned from bpf_obj_new. Each allocation receives a new reg->id.
11618 switch ((int)reg->type) { in check_reg_allocation_locked()
11620 ptr = reg->map_ptr; in check_reg_allocation_locked()
11623 ptr = reg->btf; in check_reg_allocation_locked()
11627 return -EFAULT; in check_reg_allocation_locked()
11629 id = reg->id; in check_reg_allocation_locked()
11631 if (!env->cur_state->active_lock.ptr) in check_reg_allocation_locked()
11632 return -EINVAL; in check_reg_allocation_locked()
11633 if (env->cur_state->active_lock.ptr != ptr || in check_reg_allocation_locked()
11634 env->cur_state->active_lock.id != id) { in check_reg_allocation_locked()
11636 return -EINVAL; in check_reg_allocation_locked()
11674 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && in is_bpf_throw_kfunc()
11675 insn->imm == special_kfunc_list[KF_bpf_throw]; in is_bpf_throw_kfunc()
11758 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_root()
11760 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
11763 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_root()
11764 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
11767 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_root()
11771 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
11775 head_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_root()
11779 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
11785 rec->spin_lock_off, head_type_name); in __process_kf_arg_ptr_to_graph_root()
11786 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
11791 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
11802 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_head()
11810 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_root()
11826 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_node()
11828 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
11831 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_node()
11832 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
11835 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_node()
11839 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11842 node_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_node()
11846 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11851 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); in __process_kf_arg_ptr_to_graph_node()
11852 t = btf_type_by_id(reg->btf, reg->btf_id); in __process_kf_arg_ptr_to_graph_node()
11853 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, in __process_kf_arg_ptr_to_graph_node()
11854 field->graph_root.value_btf_id, true)) { in __process_kf_arg_ptr_to_graph_node()
11859 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
11860 btf_name_by_offset(field->graph_root.btf, et->name_off), in __process_kf_arg_ptr_to_graph_node()
11861 node_off, btf_name_by_offset(reg->btf, t->name_off)); in __process_kf_arg_ptr_to_graph_node()
11862 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11864 meta->arg_btf = reg->btf; in __process_kf_arg_ptr_to_graph_node()
11865 meta->arg_btf_id = reg->btf_id; in __process_kf_arg_ptr_to_graph_node()
11867 if (node_off != field->graph_root.node_offset) { in __process_kf_arg_ptr_to_graph_node()
11870 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
11871 btf_name_by_offset(field->graph_root.btf, et->name_off)); in __process_kf_arg_ptr_to_graph_node()
11872 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
11884 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_node()
11893 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_node()
11898 * LSM hooks and iters (both sleepable and non-sleepable) are safe.
11904 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_css_task_iter_allowlist()
11910 if (env->prog->expected_attach_type == BPF_TRACE_ITER) in check_css_task_iter_allowlist()
11921 const char *func_name = meta->func_name, *ref_tname; in check_kfunc_args()
11922 const struct btf *btf = meta->btf; in check_kfunc_args()
11928 args = (const struct btf_param *)(meta->func_proto + 1); in check_kfunc_args()
11929 nargs = btf_type_vlen(meta->func_proto); in check_kfunc_args()
11933 return -EINVAL; in check_kfunc_args()
11953 if (reg->type != SCALAR_VALUE) { in check_kfunc_args()
11955 return -EINVAL; in check_kfunc_args()
11958 if (is_kfunc_arg_constant(meta->btf, &args[i])) { in check_kfunc_args()
11959 if (meta->arg_constant.found) { in check_kfunc_args()
11961 return -EFAULT; in check_kfunc_args()
11963 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
11965 return -EINVAL; in check_kfunc_args()
11970 meta->arg_constant.found = true; in check_kfunc_args()
11971 meta->arg_constant.value = reg->var_off.value; in check_kfunc_args()
11973 meta->r0_rdonly = true; in check_kfunc_args()
11980 if (meta->r0_size) { in check_kfunc_args()
11982 return -EINVAL; in check_kfunc_args()
11985 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
11987 return -EINVAL; in check_kfunc_args()
11990 meta->r0_size = reg->var_off.value; in check_kfunc_args()
12000 return -EINVAL; in check_kfunc_args()
12004 (register_is_null(reg) || type_may_be_null(reg->type)) && in check_kfunc_args()
12005 !is_kfunc_arg_nullable(meta->btf, &args[i])) { in check_kfunc_args()
12007 return -EACCES; in check_kfunc_args()
12010 if (reg->ref_obj_id) { in check_kfunc_args()
12011 if (is_kfunc_release(meta) && meta->ref_obj_id) { in check_kfunc_args()
12013 regno, reg->ref_obj_id, in check_kfunc_args()
12014 meta->ref_obj_id); in check_kfunc_args()
12015 return -EFAULT; in check_kfunc_args()
12017 meta->ref_obj_id = reg->ref_obj_id; in check_kfunc_args()
12019 meta->release_regno = regno; in check_kfunc_args()
12022 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); in check_kfunc_args()
12023 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
12033 if (!reg->map_ptr) { in check_kfunc_args()
12035 return -EINVAL; in check_kfunc_args()
12037 if (meta->map.ptr && reg->map_ptr->record->wq_off >= 0) { in check_kfunc_args()
12050 if (meta->map.ptr != reg->map_ptr || in check_kfunc_args()
12051 meta->map.uid != reg->map_uid) { in check_kfunc_args()
12054 meta->map.uid, reg->map_uid); in check_kfunc_args()
12055 return -EINVAL; in check_kfunc_args()
12058 meta->map.ptr = reg->map_ptr; in check_kfunc_args()
12059 meta->map.uid = reg->map_uid; in check_kfunc_args()
12069 return -EINVAL; in check_kfunc_args()
12073 return -EINVAL; in check_kfunc_args()
12093 return -EFAULT; in check_kfunc_args()
12096 if (is_kfunc_release(meta) && reg->ref_obj_id) in check_kfunc_args()
12104 if (reg->type != PTR_TO_CTX) { in check_kfunc_args()
12106 i, reg_type_str(env, reg->type)); in check_kfunc_args()
12107 return -EINVAL; in check_kfunc_args()
12110 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { in check_kfunc_args()
12111 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); in check_kfunc_args()
12113 return -EINVAL; in check_kfunc_args()
12114 meta->ret_btf_id = ret; in check_kfunc_args()
12118 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12119 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { in check_kfunc_args()
12121 return -EINVAL; in check_kfunc_args()
12123 } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { in check_kfunc_args()
12124 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { in check_kfunc_args()
12126 return -EINVAL; in check_kfunc_args()
12130 return -EINVAL; in check_kfunc_args()
12132 if (!reg->ref_obj_id) { in check_kfunc_args()
12134 return -EINVAL; in check_kfunc_args()
12136 if (meta->btf == btf_vmlinux) { in check_kfunc_args()
12137 meta->arg_btf = reg->btf; in check_kfunc_args()
12138 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
12146 if (reg->type == CONST_PTR_TO_DYNPTR) in check_kfunc_args()
12152 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { in check_kfunc_args()
12154 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { in check_kfunc_args()
12156 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && in check_kfunc_args()
12158 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; in check_kfunc_args()
12162 return -EFAULT; in check_kfunc_args()
12166 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; in check_kfunc_args()
12169 return -EFAULT; in check_kfunc_args()
12184 meta->initialized_dynptr.id = id; in check_kfunc_args()
12185 meta->initialized_dynptr.type = dynptr_get_type(env, reg); in check_kfunc_args()
12186 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); in check_kfunc_args()
12192 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { in check_kfunc_args()
12195 return -EINVAL; in check_kfunc_args()
12203 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
12204 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12206 return -EINVAL; in check_kfunc_args()
12208 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
12210 return -EINVAL; in check_kfunc_args()
12217 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
12218 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12220 return -EINVAL; in check_kfunc_args()
12222 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
12224 return -EINVAL; in check_kfunc_args()
12231 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12233 return -EINVAL; in check_kfunc_args()
12235 if (!reg->ref_obj_id) { in check_kfunc_args()
12237 return -EINVAL; in check_kfunc_args()
12244 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { in check_kfunc_args()
12245 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { in check_kfunc_args()
12246 verbose(env, "rbtree_remove node input must be non-owning ref\n"); in check_kfunc_args()
12247 return -EINVAL; in check_kfunc_args()
12251 return -EINVAL; in check_kfunc_args()
12254 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12256 return -EINVAL; in check_kfunc_args()
12258 if (!reg->ref_obj_id) { in check_kfunc_args()
12260 return -EINVAL; in check_kfunc_args()
12272 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
12276 if ((base_type(reg->type) != PTR_TO_BTF_ID || in check_kfunc_args()
12277 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && in check_kfunc_args()
12278 !reg2btf_ids[base_type(reg->type)]) { in check_kfunc_args()
12279 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); in check_kfunc_args()
12281 reg_type_str(env, base_type(reg->type) | in check_kfunc_args()
12282 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); in check_kfunc_args()
12283 return -EINVAL; in check_kfunc_args()
12294 return -EINVAL; in check_kfunc_args()
12307 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { in check_kfunc_args()
12315 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { in check_kfunc_args()
12316 if (meta->arg_constant.found) { in check_kfunc_args()
12318 return -EFAULT; in check_kfunc_args()
12320 if (!tnum_is_const(size_reg->var_off)) { in check_kfunc_args()
12322 return -EINVAL; in check_kfunc_args()
12324 meta->arg_constant.found = true; in check_kfunc_args()
12325 meta->arg_constant.value = size_reg->var_off.value; in check_kfunc_args()
12333 if (reg->type != PTR_TO_FUNC) { in check_kfunc_args()
12335 return -EINVAL; in check_kfunc_args()
12337 meta->subprogno = reg->subprogno; in check_kfunc_args()
12340 if (!type_is_ptr_alloc_obj(reg->type)) { in check_kfunc_args()
12341 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); in check_kfunc_args()
12342 return -EINVAL; in check_kfunc_args()
12344 if (!type_is_non_owning_ref(reg->type)) in check_kfunc_args()
12345 meta->arg_owning_ref = true; in check_kfunc_args()
12350 return -EFAULT; in check_kfunc_args()
12353 if (rec->refcount_off < 0) { in check_kfunc_args()
12355 return -EINVAL; in check_kfunc_args()
12358 meta->arg_btf = reg->btf; in check_kfunc_args()
12359 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
12362 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
12364 return -EINVAL; in check_kfunc_args()
12371 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
12373 return -EINVAL; in check_kfunc_args()
12382 if (is_kfunc_release(meta) && !meta->release_regno) { in check_kfunc_args()
12385 return -EINVAL; in check_kfunc_args()
12404 if (!insn->imm) in fetch_kfunc_meta()
12405 return -EINVAL; in fetch_kfunc_meta()
12407 desc_btf = find_kfunc_desc_btf(env, insn->off); in fetch_kfunc_meta()
12411 func_id = insn->imm; in fetch_kfunc_meta()
12413 func_name = btf_name_by_offset(desc_btf, func->name_off); in fetch_kfunc_meta()
12416 func_proto = btf_type_by_id(desc_btf, func->type); in fetch_kfunc_meta()
12418 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); in fetch_kfunc_meta()
12420 return -EACCES; in fetch_kfunc_meta()
12424 meta->btf = desc_btf; in fetch_kfunc_meta()
12425 meta->func_id = func_id; in fetch_kfunc_meta()
12426 meta->kfunc_flags = *kfunc_flags; in fetch_kfunc_meta()
12427 meta->func_proto = func_proto; in fetch_kfunc_meta()
12428 meta->func_name = func_name; in fetch_kfunc_meta()
12451 if (!insn->imm) in check_kfunc_call()
12455 if (err == -EACCES && func_name) in check_kfunc_call()
12460 insn_aux = &env->insn_aux_data[insn_idx]; in check_kfunc_call()
12462 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); in check_kfunc_call()
12466 return -EACCES; in check_kfunc_call()
12472 return -EACCES; in check_kfunc_call()
12511 if (env->cur_state->active_rcu_lock) { in check_kfunc_call()
12518 return -EACCES; in check_kfunc_call()
12523 return -EINVAL; in check_kfunc_call()
12525 bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ in check_kfunc_call()
12526 if (reg->type & MEM_RCU) { in check_kfunc_call()
12527 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); in check_kfunc_call()
12528 reg->type |= PTR_UNTRUSTED; in check_kfunc_call()
12531 env->cur_state->active_rcu_lock = false; in check_kfunc_call()
12534 return -EACCES; in check_kfunc_call()
12537 env->cur_state->active_rcu_lock = true; in check_kfunc_call()
12540 return -EINVAL; in check_kfunc_call()
12543 if (env->cur_state->active_preempt_lock) { in check_kfunc_call()
12545 env->cur_state->active_preempt_lock++; in check_kfunc_call()
12547 env->cur_state->active_preempt_lock--; in check_kfunc_call()
12549 verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name); in check_kfunc_call()
12550 return -EACCES; in check_kfunc_call()
12553 env->cur_state->active_preempt_lock++; in check_kfunc_call()
12556 return -EINVAL; in check_kfunc_call()
12575 insn_aux->insert_off = regs[BPF_REG_2].off; in check_kfunc_call()
12576 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); in check_kfunc_call()
12579 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", in check_kfunc_call()
12596 return -ENOTSUPP; in check_kfunc_call()
12598 env->seen_exception = true; in check_kfunc_call()
12603 if (!env->exception_callback_subprog) { in check_kfunc_call()
12614 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); in check_kfunc_call()
12623 return -EINVAL; in check_kfunc_call()
12629 mark_btf_func_reg_size(env, BPF_REG_0, t->size); in check_kfunc_call()
12631 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); in check_kfunc_call()
12641 return -ENOMEM; in check_kfunc_call()
12645 return -EINVAL; in check_kfunc_call()
12648 ret_btf = env->prog->aux->btf; in check_kfunc_call()
12654 return -EINVAL; in check_kfunc_call()
12660 return -EINVAL; in check_kfunc_call()
12664 if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { in check_kfunc_call()
12666 ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); in check_kfunc_call()
12667 return -EINVAL; in check_kfunc_call()
12686 err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); in check_kfunc_call()
12696 return -EINVAL; in check_kfunc_call()
12701 return -EINVAL; in check_kfunc_call()
12712 insn_aux->obj_new_size = ret_t->size; in check_kfunc_call()
12713 insn_aux->kptr_struct_meta = struct_meta; in check_kfunc_call()
12720 insn_aux->kptr_struct_meta = in check_kfunc_call()
12727 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
12732 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
12743 return -EINVAL; in check_kfunc_call()
12758 return -EFAULT; in check_kfunc_call()
12769 /* this will set env->seen_direct_write to true */ in check_kfunc_call()
12772 return -EINVAL; in check_kfunc_call()
12778 return -EFAULT; in check_kfunc_call()
12789 return -EFAULT; in check_kfunc_call()
12805 ptr_type->name_off); in check_kfunc_call()
12811 return -EINVAL; in check_kfunc_call()
12833 cur_iter = get_iter_from_state(env->cur_state, &meta); in check_kfunc_call()
12835 if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */ in check_kfunc_call()
12845 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12861 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12866 insn_aux->kptr_struct_meta = in check_kfunc_call()
12883 mark_btf_func_reg_size(env, regno, t->size); in check_kfunc_call()
12899 bool known = tnum_is_const(reg->var_off); in check_reg_sane_offset()
12900 s64 val = reg->var_off.value; in check_reg_sane_offset()
12901 s64 smin = reg->smin_value; in check_reg_sane_offset()
12903 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { in check_reg_sane_offset()
12909 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
12911 reg_type_str(env, type), reg->off); in check_reg_sane_offset()
12921 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
12931 REASON_BOUNDS = -1,
12932 REASON_TYPE = -2,
12933 REASON_PATHS = -3,
12934 REASON_LIMIT = -4,
12935 REASON_STACK = -5,
12943 switch (ptr_reg->type) { in retrieve_ptr_limit()
12945 /* Offset 0 is out-of-bounds, but acceptable start for the in retrieve_ptr_limit()
12951 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); in retrieve_ptr_limit()
12954 max = ptr_reg->map_ptr->value_size; in retrieve_ptr_limit()
12956 ptr_reg->smin_value : in retrieve_ptr_limit()
12957 ptr_reg->umax_value) + ptr_reg->off; in retrieve_ptr_limit()
12972 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
12981 if (aux->alu_state && in update_alu_sanitation_state()
12982 (aux->alu_state != alu_state || in update_alu_sanitation_state()
12983 aux->alu_limit != alu_limit)) in update_alu_sanitation_state()
12987 aux->alu_state = alu_state; in update_alu_sanitation_state()
12988 aux->alu_limit = alu_limit; in update_alu_sanitation_state()
13023 regs = branch->frame[branch->curframe]->regs; in sanitize_speculative_path()
13024 if (BPF_SRC(insn->code) == BPF_K) { in sanitize_speculative_path()
13025 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
13026 } else if (BPF_SRC(insn->code) == BPF_X) { in sanitize_speculative_path()
13027 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
13028 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
13042 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; in sanitize_ptr_alu()
13043 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
13044 bool off_is_imm = tnum_is_const(off_reg->var_off); in sanitize_ptr_alu()
13045 bool off_is_neg = off_reg->smin_value < 0; in sanitize_ptr_alu()
13047 u8 opcode = BPF_OP(insn->code); in sanitize_ptr_alu()
13056 /* We already marked aux for masking from non-speculative in sanitize_ptr_alu()
13060 if (vstate->speculative) in sanitize_ptr_alu()
13064 if (!tnum_is_const(off_reg->var_off) && in sanitize_ptr_alu()
13065 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) in sanitize_ptr_alu()
13068 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || in sanitize_ptr_alu()
13072 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); in sanitize_ptr_alu()
13080 alu_state = info->aux.alu_state; in sanitize_ptr_alu()
13081 alu_limit = abs(info->aux.alu_limit - alu_limit); in sanitize_ptr_alu()
13092 env->explore_alu_limits = true; in sanitize_ptr_alu()
13103 * Also, when register is a known constant, we rewrite register-based in sanitize_ptr_alu()
13104 * operation to immediate-based, and thus do not need masking (and as in sanitize_ptr_alu()
13105 * a consequence, do not need to simulate the zero-truncation either). in sanitize_ptr_alu()
13110 /* Simulate and find potential out-of-bounds access under in sanitize_ptr_alu()
13114 * to simulate dst (== 0) +/-= ptr. Needed, for example, in sanitize_ptr_alu()
13115 * for cases where we use K-based arithmetic in one direction in sanitize_ptr_alu()
13116 * and truncated reg-based in the other in order to explore in sanitize_ptr_alu()
13123 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, in sanitize_ptr_alu()
13124 env->insn_idx); in sanitize_ptr_alu()
13132 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_mark_insn_seen()
13136 * the non-speculative domain, sanitize_dead_code() can still in sanitize_mark_insn_seen()
13139 if (!vstate->speculative) in sanitize_mark_insn_seen()
13140 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in sanitize_mark_insn_seen()
13149 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; in sanitize_err()
13150 u32 dst = insn->dst_reg, src = insn->src_reg; in sanitize_err()
13179 return -EACCES; in sanitize_err()
13190 * 'off' includes 'reg->off'.
13198 if (!tnum_is_const(reg->var_off)) { in check_stack_access_for_ptr_arithmetic()
13201 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_for_ptr_arithmetic()
13204 return -EACCES; in check_stack_access_for_ptr_arithmetic()
13207 if (off >= 0 || off < -MAX_BPF_STACK) { in check_stack_access_for_ptr_arithmetic()
13210 return -EACCES; in check_stack_access_for_ptr_arithmetic()
13220 u32 dst = insn->dst_reg; in sanitize_check_bounds()
13225 if (env->bypass_spec_v1) in sanitize_check_bounds()
13228 switch (dst_reg->type) { in sanitize_check_bounds()
13231 dst_reg->off + dst_reg->var_off.value)) in sanitize_check_bounds()
13232 return -EACCES; in sanitize_check_bounds()
13235 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { in sanitize_check_bounds()
13238 return -EACCES; in sanitize_check_bounds()
13250 * If we return -EACCES, caller may want to try again treating pointer as a
13251 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
13258 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
13259 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals()
13260 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
13261 bool known = tnum_is_const(off_reg->var_off); in adjust_ptr_min_max_vals()
13262 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, in adjust_ptr_min_max_vals()
13263 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; in adjust_ptr_min_max_vals()
13264 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, in adjust_ptr_min_max_vals()
13265 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; in adjust_ptr_min_max_vals()
13267 u8 opcode = BPF_OP(insn->code); in adjust_ptr_min_max_vals()
13268 u32 dst = insn->dst_reg; in adjust_ptr_min_max_vals()
13282 if (BPF_CLASS(insn->code) != BPF_ALU64) { in adjust_ptr_min_max_vals()
13283 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ in adjust_ptr_min_max_vals()
13284 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
13290 "R%d 32-bit pointer arithmetic prohibited\n", in adjust_ptr_min_max_vals()
13292 return -EACCES; in adjust_ptr_min_max_vals()
13295 if (ptr_reg->type & PTR_MAYBE_NULL) { in adjust_ptr_min_max_vals()
13296 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
13297 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
13298 return -EACCES; in adjust_ptr_min_max_vals()
13301 switch (base_type(ptr_reg->type)) { in adjust_ptr_min_max_vals()
13326 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
13327 return -EACCES; in adjust_ptr_min_max_vals()
13333 dst_reg->type = ptr_reg->type; in adjust_ptr_min_max_vals()
13334 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
13336 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
13337 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
13338 return -EINVAL; in adjust_ptr_min_max_vals()
13340 /* pointer types do not carry 32-bit bounds at the moment. */ in adjust_ptr_min_max_vals()
13355 if (known && (ptr_reg->off + smin_val == in adjust_ptr_min_max_vals()
13356 (s64)(s32)(ptr_reg->off + smin_val))) { in adjust_ptr_min_max_vals()
13358 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
13359 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
13360 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
13361 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
13362 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
13363 dst_reg->off = ptr_reg->off + smin_val; in adjust_ptr_min_max_vals()
13364 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
13367 /* A new variable offset is created. Note that off_reg->off in adjust_ptr_min_max_vals()
13376 if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || in adjust_ptr_min_max_vals()
13377 check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { in adjust_ptr_min_max_vals()
13378 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
13379 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
13381 if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || in adjust_ptr_min_max_vals()
13382 check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { in adjust_ptr_min_max_vals()
13383 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
13384 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
13386 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
13387 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
13388 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
13390 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
13392 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
13397 /* scalar -= pointer. Creates an unknown scalar */ in adjust_ptr_min_max_vals()
13400 return -EACCES; in adjust_ptr_min_max_vals()
13406 if (ptr_reg->type == PTR_TO_STACK) { in adjust_ptr_min_max_vals()
13409 return -EACCES; in adjust_ptr_min_max_vals()
13411 if (known && (ptr_reg->off - smin_val == in adjust_ptr_min_max_vals()
13412 (s64)(s32)(ptr_reg->off - smin_val))) { in adjust_ptr_min_max_vals()
13413 /* pointer -= K. Subtract it from fixed offset */ in adjust_ptr_min_max_vals()
13414 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
13415 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
13416 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
13417 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
13418 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
13419 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
13420 dst_reg->off = ptr_reg->off - smin_val; in adjust_ptr_min_max_vals()
13421 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
13425 * nonnegative, then any reg->range we had before is still good. in adjust_ptr_min_max_vals()
13427 if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) || in adjust_ptr_min_max_vals()
13428 check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) { in adjust_ptr_min_max_vals()
13430 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
13431 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
13435 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
13436 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
13439 dst_reg->umin_value = umin_ptr - umax_val; in adjust_ptr_min_max_vals()
13440 dst_reg->umax_value = umax_ptr - umin_val; in adjust_ptr_min_max_vals()
13442 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
13443 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
13444 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
13446 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
13449 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
13458 return -EACCES; in adjust_ptr_min_max_vals()
13460 /* other operators (e.g. MUL,LSH) produce non-pointer results */ in adjust_ptr_min_max_vals()
13463 return -EACCES; in adjust_ptr_min_max_vals()
13466 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
13467 return -EINVAL; in adjust_ptr_min_max_vals()
13470 return -EACCES; in adjust_ptr_min_max_vals()
13484 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_add()
13485 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_add()
13486 u32 *dst_umin = &dst_reg->u32_min_value; in scalar32_min_max_add()
13487 u32 *dst_umax = &dst_reg->u32_max_value; in scalar32_min_max_add()
13489 if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || in scalar32_min_max_add()
13490 check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { in scalar32_min_max_add()
13494 if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) || in scalar32_min_max_add()
13495 check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) { in scalar32_min_max_add()
13504 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_add()
13505 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_add()
13506 u64 *dst_umin = &dst_reg->umin_value; in scalar_min_max_add()
13507 u64 *dst_umax = &dst_reg->umax_value; in scalar_min_max_add()
13509 if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || in scalar_min_max_add()
13510 check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { in scalar_min_max_add()
13514 if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) || in scalar_min_max_add()
13515 check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) { in scalar_min_max_add()
13524 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_sub()
13525 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_sub()
13526 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_sub()
13527 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_sub()
13529 if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) || in scalar32_min_max_sub()
13530 check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) { in scalar32_min_max_sub()
13535 if (dst_reg->u32_min_value < umax_val) { in scalar32_min_max_sub()
13537 dst_reg->u32_min_value = 0; in scalar32_min_max_sub()
13538 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_sub()
13541 dst_reg->u32_min_value -= umax_val; in scalar32_min_max_sub()
13542 dst_reg->u32_max_value -= umin_val; in scalar32_min_max_sub()
13549 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_sub()
13550 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_sub()
13551 u64 umin_val = src_reg->umin_value; in scalar_min_max_sub()
13552 u64 umax_val = src_reg->umax_value; in scalar_min_max_sub()
13554 if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) || in scalar_min_max_sub()
13555 check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) { in scalar_min_max_sub()
13560 if (dst_reg->umin_value < umax_val) { in scalar_min_max_sub()
13562 dst_reg->umin_value = 0; in scalar_min_max_sub()
13563 dst_reg->umax_value = U64_MAX; in scalar_min_max_sub()
13566 dst_reg->umin_value -= umax_val; in scalar_min_max_sub()
13567 dst_reg->umax_value -= umin_val; in scalar_min_max_sub()
13574 s32 smin_val = src_reg->s32_min_value; in scalar32_min_max_mul()
13575 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_mul()
13576 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_mul()
13578 if (smin_val < 0 || dst_reg->s32_min_value < 0) { in scalar32_min_max_mul()
13586 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { in scalar32_min_max_mul()
13591 dst_reg->u32_min_value *= umin_val; in scalar32_min_max_mul()
13592 dst_reg->u32_max_value *= umax_val; in scalar32_min_max_mul()
13593 if (dst_reg->u32_max_value > S32_MAX) { in scalar32_min_max_mul()
13595 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_mul()
13596 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_mul()
13598 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_mul()
13599 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_mul()
13606 s64 smin_val = src_reg->smin_value; in scalar_min_max_mul()
13607 u64 umin_val = src_reg->umin_value; in scalar_min_max_mul()
13608 u64 umax_val = src_reg->umax_value; in scalar_min_max_mul()
13610 if (smin_val < 0 || dst_reg->smin_value < 0) { in scalar_min_max_mul()
13618 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { in scalar_min_max_mul()
13623 dst_reg->umin_value *= umin_val; in scalar_min_max_mul()
13624 dst_reg->umax_value *= umax_val; in scalar_min_max_mul()
13625 if (dst_reg->umax_value > S64_MAX) { in scalar_min_max_mul()
13627 dst_reg->smin_value = S64_MIN; in scalar_min_max_mul()
13628 dst_reg->smax_value = S64_MAX; in scalar_min_max_mul()
13630 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_mul()
13631 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_mul()
13638 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_and()
13639 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_and()
13640 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_and()
13641 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_and()
13651 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_and()
13652 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); in scalar32_min_max_and()
13657 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_and()
13658 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_and()
13659 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_and()
13661 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_and()
13662 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_and()
13669 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_and()
13670 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_and()
13671 u64 umax_val = src_reg->umax_value; in scalar_min_max_and()
13674 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_and()
13681 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_and()
13682 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); in scalar_min_max_and()
13687 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_and()
13688 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_and()
13689 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_and()
13691 dst_reg->smin_value = S64_MIN; in scalar_min_max_and()
13692 dst_reg->smax_value = S64_MAX; in scalar_min_max_and()
13701 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_or()
13702 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_or()
13703 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_or()
13704 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_or()
13714 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); in scalar32_min_max_or()
13715 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_or()
13720 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_or()
13721 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_or()
13722 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_or()
13724 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_or()
13725 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_or()
13732 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_or()
13733 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_or()
13734 u64 umin_val = src_reg->umin_value; in scalar_min_max_or()
13737 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_or()
13744 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); in scalar_min_max_or()
13745 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_or()
13750 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_or()
13751 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_or()
13752 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_or()
13754 dst_reg->smin_value = S64_MIN; in scalar_min_max_or()
13755 dst_reg->smax_value = S64_MAX; in scalar_min_max_or()
13764 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_xor()
13765 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_xor()
13766 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_xor()
13774 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_xor()
13775 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_xor()
13780 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_xor()
13781 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_xor()
13782 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_xor()
13784 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_xor()
13785 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_xor()
13792 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_xor()
13793 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_xor()
13796 /* dst_reg->var_off.value has been updated earlier */ in scalar_min_max_xor()
13797 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_xor()
13802 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_xor()
13803 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_xor()
13808 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_xor()
13809 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_xor()
13810 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_xor()
13812 dst_reg->smin_value = S64_MIN; in scalar_min_max_xor()
13813 dst_reg->smax_value = S64_MAX; in scalar_min_max_xor()
13825 dst_reg->s32_min_value = S32_MIN; in __scalar32_min_max_lsh()
13826 dst_reg->s32_max_value = S32_MAX; in __scalar32_min_max_lsh()
13828 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { in __scalar32_min_max_lsh()
13829 dst_reg->u32_min_value = 0; in __scalar32_min_max_lsh()
13830 dst_reg->u32_max_value = U32_MAX; in __scalar32_min_max_lsh()
13832 dst_reg->u32_min_value <<= umin_val; in __scalar32_min_max_lsh()
13833 dst_reg->u32_max_value <<= umax_val; in __scalar32_min_max_lsh()
13840 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_lsh()
13841 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_lsh()
13843 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_lsh()
13846 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); in scalar32_min_max_lsh()
13865 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) in __scalar64_min_max_lsh()
13866 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; in __scalar64_min_max_lsh()
13868 dst_reg->smax_value = S64_MAX; in __scalar64_min_max_lsh()
13870 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) in __scalar64_min_max_lsh()
13871 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; in __scalar64_min_max_lsh()
13873 dst_reg->smin_value = S64_MIN; in __scalar64_min_max_lsh()
13876 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { in __scalar64_min_max_lsh()
13877 dst_reg->umin_value = 0; in __scalar64_min_max_lsh()
13878 dst_reg->umax_value = U64_MAX; in __scalar64_min_max_lsh()
13880 dst_reg->umin_value <<= umin_val; in __scalar64_min_max_lsh()
13881 dst_reg->umax_value <<= umax_val; in __scalar64_min_max_lsh()
13888 u64 umax_val = src_reg->umax_value; in scalar_min_max_lsh()
13889 u64 umin_val = src_reg->umin_value; in scalar_min_max_lsh()
13895 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); in scalar_min_max_lsh()
13903 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_rsh()
13904 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_rsh()
13905 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_rsh()
13921 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_rsh()
13922 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_rsh()
13924 dst_reg->var_off = tnum_rshift(subreg, umin_val); in scalar32_min_max_rsh()
13925 dst_reg->u32_min_value >>= umax_val; in scalar32_min_max_rsh()
13926 dst_reg->u32_max_value >>= umin_val; in scalar32_min_max_rsh()
13935 u64 umax_val = src_reg->umax_value; in scalar_min_max_rsh()
13936 u64 umin_val = src_reg->umin_value; in scalar_min_max_rsh()
13952 dst_reg->smin_value = S64_MIN; in scalar_min_max_rsh()
13953 dst_reg->smax_value = S64_MAX; in scalar_min_max_rsh()
13954 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); in scalar_min_max_rsh()
13955 dst_reg->umin_value >>= umax_val; in scalar_min_max_rsh()
13956 dst_reg->umax_value >>= umin_val; in scalar_min_max_rsh()
13969 u64 umin_val = src_reg->u32_min_value; in scalar32_min_max_arsh()
13974 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); in scalar32_min_max_arsh()
13975 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); in scalar32_min_max_arsh()
13977 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); in scalar32_min_max_arsh()
13982 dst_reg->u32_min_value = 0; in scalar32_min_max_arsh()
13983 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_arsh()
13992 u64 umin_val = src_reg->umin_value; in scalar_min_max_arsh()
13997 dst_reg->smin_value >>= umin_val; in scalar_min_max_arsh()
13998 dst_reg->smax_value >>= umin_val; in scalar_min_max_arsh()
14000 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); in scalar_min_max_arsh()
14005 dst_reg->umin_value = 0; in scalar_min_max_arsh()
14006 dst_reg->umax_value = U64_MAX; in scalar_min_max_arsh()
14009 * on bits being shifted in from upper 32-bits. Take easy way out in scalar_min_max_arsh()
14020 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; in is_safe_to_compute_dst_reg_range()
14023 if (tnum_subreg_is_const(src_reg->var_off) in is_safe_to_compute_dst_reg_range()
14024 && src_reg->s32_min_value == src_reg->s32_max_value in is_safe_to_compute_dst_reg_range()
14025 && src_reg->u32_min_value == src_reg->u32_max_value) in is_safe_to_compute_dst_reg_range()
14028 if (tnum_is_const(src_reg->var_off) in is_safe_to_compute_dst_reg_range()
14029 && src_reg->smin_value == src_reg->smax_value in is_safe_to_compute_dst_reg_range()
14030 && src_reg->umin_value == src_reg->umax_value) in is_safe_to_compute_dst_reg_range()
14034 switch (BPF_OP(insn->code)) { in is_safe_to_compute_dst_reg_range()
14050 return (src_is_const && src_reg->umax_value < insn_bitness); in is_safe_to_compute_dst_reg_range()
14056 /* WARNING: This function does calculations on 64-bit values, but the actual
14057 * execution may occur on 32-bit values. Therefore, things like bitshifts
14058 * need extra checks in the 32-bit case.
14065 u8 opcode = BPF_OP(insn->code); in adjust_scalar_min_max_vals()
14066 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_scalar_min_max_vals()
14086 * understand and calculate behavior in both 32-bit and 64-bit alu ops. in adjust_scalar_min_max_vals()
14098 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14103 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14106 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14111 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14116 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14121 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14160 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
14161 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals()
14162 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
14164 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_reg_min_max_vals()
14165 u8 opcode = BPF_OP(insn->code); in adjust_reg_min_max_vals()
14168 dst_reg = &regs[insn->dst_reg]; in adjust_reg_min_max_vals()
14171 if (dst_reg->type == PTR_TO_ARENA) { in adjust_reg_min_max_vals()
14174 if (BPF_CLASS(insn->code) == BPF_ALU64) in adjust_reg_min_max_vals()
14176 * 32-bit operations zero upper bits automatically. in adjust_reg_min_max_vals()
14177 * 64-bit operations need to be converted to 32. in adjust_reg_min_max_vals()
14179 aux->needs_zext = true; in adjust_reg_min_max_vals()
14185 if (dst_reg->type != SCALAR_VALUE) in adjust_reg_min_max_vals()
14188 if (BPF_SRC(insn->code) == BPF_X) { in adjust_reg_min_max_vals()
14189 src_reg = &regs[insn->src_reg]; in adjust_reg_min_max_vals()
14190 if (src_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
14191 if (dst_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
14196 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
14197 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
14201 insn->dst_reg, in adjust_reg_min_max_vals()
14203 return -EACCES; in adjust_reg_min_max_vals()
14209 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
14217 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
14222 } else if (dst_reg->precise) { in adjust_reg_min_max_vals()
14224 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
14233 __mark_reg_known(&off_reg, insn->imm); in adjust_reg_min_max_vals()
14244 return -EINVAL; in adjust_reg_min_max_vals()
14249 return -EINVAL; in adjust_reg_min_max_vals()
14260 * So for 64-bit alu remember constant delta between r2 and r1 and in adjust_reg_min_max_vals()
14263 if (env->bpf_capable && in adjust_reg_min_max_vals()
14264 BPF_OP(insn->code) == BPF_ADD && !alu32 && in adjust_reg_min_max_vals()
14265 dst_reg->id && is_reg_const(src_reg, false)) { in adjust_reg_min_max_vals()
14268 if ((dst_reg->id & BPF_ADD_CONST) || in adjust_reg_min_max_vals()
14273 * we cannot accumulate another val into rx->off. in adjust_reg_min_max_vals()
14275 dst_reg->off = 0; in adjust_reg_min_max_vals()
14276 dst_reg->id = 0; in adjust_reg_min_max_vals()
14278 dst_reg->id |= BPF_ADD_CONST; in adjust_reg_min_max_vals()
14279 dst_reg->off = val; in adjust_reg_min_max_vals()
14286 dst_reg->id = 0; in adjust_reg_min_max_vals()
14291 /* check validity of 32-bit and 64-bit arithmetic operations */
14295 u8 opcode = BPF_OP(insn->code); in check_alu_op()
14300 if (BPF_SRC(insn->code) != BPF_K || in check_alu_op()
14301 insn->src_reg != BPF_REG_0 || in check_alu_op()
14302 insn->off != 0 || insn->imm != 0) { in check_alu_op()
14304 return -EINVAL; in check_alu_op()
14307 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || in check_alu_op()
14308 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || in check_alu_op()
14309 (BPF_CLASS(insn->code) == BPF_ALU64 && in check_alu_op()
14310 BPF_SRC(insn->code) != BPF_TO_LE)) { in check_alu_op()
14312 return -EINVAL; in check_alu_op()
14317 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
14321 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
14323 insn->dst_reg); in check_alu_op()
14324 return -EACCES; in check_alu_op()
14328 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
14334 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
14335 if (BPF_CLASS(insn->code) == BPF_ALU) { in check_alu_op()
14336 if ((insn->off != 0 && insn->off != 8 && insn->off != 16) || in check_alu_op()
14337 insn->imm) { in check_alu_op()
14339 return -EINVAL; in check_alu_op()
14341 } else if (insn->off == BPF_ADDR_SPACE_CAST) { in check_alu_op()
14342 if (insn->imm != 1 && insn->imm != 1u << 16) { in check_alu_op()
14344 return -EINVAL; in check_alu_op()
14346 if (!env->prog->aux->arena) { in check_alu_op()
14348 return -EINVAL; in check_alu_op()
14351 if ((insn->off != 0 && insn->off != 8 && insn->off != 16 && in check_alu_op()
14352 insn->off != 32) || insn->imm) { in check_alu_op()
14354 return -EINVAL; in check_alu_op()
14359 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
14363 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
14365 return -EINVAL; in check_alu_op()
14370 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
14374 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
14375 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
14376 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
14378 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
14379 if (insn->imm) { in check_alu_op()
14381 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
14382 if (insn->imm == 1) { /* cast from as(1) to as(0) */ in check_alu_op()
14383 dst_reg->type = PTR_TO_ARENA; in check_alu_op()
14384 /* PTR_TO_ARENA is 32-bit */ in check_alu_op()
14385 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
14387 } else if (insn->off == 0) { in check_alu_op()
14393 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
14394 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
14397 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
14399 "R%d sign-extension part of pointer\n", in check_alu_op()
14400 insn->src_reg); in check_alu_op()
14401 return -EACCES; in check_alu_op()
14402 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
14405 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
14410 dst_reg->id = 0; in check_alu_op()
14411 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
14412 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
14413 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
14415 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
14420 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
14423 insn->src_reg); in check_alu_op()
14424 return -EACCES; in check_alu_op()
14425 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
14426 if (insn->off == 0) { in check_alu_op()
14437 dst_reg->id = 0; in check_alu_op()
14438 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
14439 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
14442 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
14448 dst_reg->id = 0; in check_alu_op()
14449 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
14450 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
14451 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
14455 insn->dst_reg); in check_alu_op()
14465 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
14466 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
14467 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
14468 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
14469 insn->imm); in check_alu_op()
14471 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
14472 (u32)insn->imm); in check_alu_op()
14478 return -EINVAL; in check_alu_op()
14482 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
14483 if (insn->imm != 0 || insn->off > 1 || in check_alu_op()
14484 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
14486 return -EINVAL; in check_alu_op()
14489 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
14493 if (insn->src_reg != BPF_REG_0 || insn->off > 1 || in check_alu_op()
14494 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
14496 return -EINVAL; in check_alu_op()
14501 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
14506 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { in check_alu_op()
14508 return -EINVAL; in check_alu_op()
14512 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { in check_alu_op()
14513 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; in check_alu_op()
14515 if (insn->imm < 0 || insn->imm >= size) { in check_alu_op()
14516 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
14517 return -EINVAL; in check_alu_op()
14522 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
14528 return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu"); in check_alu_op()
14540 if (dst_reg->off < 0 || in find_good_pkt_pointers()
14541 (dst_reg->off == 0 && range_right_open)) in find_good_pkt_pointers()
14545 if (dst_reg->umax_value > MAX_PACKET_OFF || in find_good_pkt_pointers()
14546 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) in find_good_pkt_pointers()
14552 new_range = dst_reg->off; in find_good_pkt_pointers()
14593 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) in find_good_pkt_pointers()
14594 * and [r3, r3 + 8-1) respectively is safe to access depending on in find_good_pkt_pointers()
14601 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. in find_good_pkt_pointers()
14604 if (reg->type == type && reg->id == dst_reg->id) in find_good_pkt_pointers()
14606 reg->range = max(reg->range, new_range); in find_good_pkt_pointers()
14616 struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; in is_scalar_branch_taken()
14617 struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; in is_scalar_branch_taken()
14618 u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value; in is_scalar_branch_taken()
14619 u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value; in is_scalar_branch_taken()
14620 s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value; in is_scalar_branch_taken()
14621 s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value; in is_scalar_branch_taken()
14622 u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value; in is_scalar_branch_taken()
14623 u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value; in is_scalar_branch_taken()
14624 s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value; in is_scalar_branch_taken()
14625 s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value; in is_scalar_branch_taken()
14634 /* non-overlapping ranges */ in is_scalar_branch_taken()
14640 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
14641 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
14644 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
14645 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
14647 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
14648 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
14658 /* non-overlapping ranges */ in is_scalar_branch_taken()
14664 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
14665 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
14668 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
14669 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
14671 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
14672 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
14682 return -1; in is_scalar_branch_taken()
14738 return -1; in is_scalar_branch_taken()
14768 if (src_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
14770 } else if (dst_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
14774 return -1; in is_pkt_ptr_branch_taken()
14777 if (pkt->range >= 0) in is_pkt_ptr_branch_taken()
14778 return -1; in is_pkt_ptr_branch_taken()
14786 if (pkt->range == BEYOND_PKT_END) in is_pkt_ptr_branch_taken()
14795 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) in is_pkt_ptr_branch_taken()
14799 return -1; in is_pkt_ptr_branch_taken()
14804 * 1 - branch will be taken and "goto target" will be executed
14805 * 0 - branch will not be taken and fall-through to next insn
14806 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
14825 return -1; in is_branch_taken()
14828 return -1; in is_branch_taken()
14835 return -1; in is_branch_taken()
14843 return -1; in is_branch_taken()
14899 reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
14900 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
14901 reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
14902 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
14903 reg2->u32_min_value = reg1->u32_min_value; in regs_refine_cond_op()
14904 reg2->u32_max_value = reg1->u32_max_value; in regs_refine_cond_op()
14905 reg2->s32_min_value = reg1->s32_min_value; in regs_refine_cond_op()
14906 reg2->s32_max_value = reg1->s32_max_value; in regs_refine_cond_op()
14908 t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off)); in regs_refine_cond_op()
14909 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
14910 reg2->var_off = tnum_with_subreg(reg2->var_off, t); in regs_refine_cond_op()
14912 reg1->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
14913 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
14914 reg1->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
14915 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
14916 reg2->umin_value = reg1->umin_value; in regs_refine_cond_op()
14917 reg2->umax_value = reg1->umax_value; in regs_refine_cond_op()
14918 reg2->smin_value = reg1->smin_value; in regs_refine_cond_op()
14919 reg2->smax_value = reg1->smax_value; in regs_refine_cond_op()
14921 reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off); in regs_refine_cond_op()
14922 reg2->var_off = reg1->var_off; in regs_refine_cond_op()
14945 if (reg1->u32_min_value == (u32)val) in regs_refine_cond_op()
14946 reg1->u32_min_value++; in regs_refine_cond_op()
14947 if (reg1->u32_max_value == (u32)val) in regs_refine_cond_op()
14948 reg1->u32_max_value--; in regs_refine_cond_op()
14949 if (reg1->s32_min_value == (s32)val) in regs_refine_cond_op()
14950 reg1->s32_min_value++; in regs_refine_cond_op()
14951 if (reg1->s32_max_value == (s32)val) in regs_refine_cond_op()
14952 reg1->s32_max_value--; in regs_refine_cond_op()
14954 if (reg1->umin_value == (u64)val) in regs_refine_cond_op()
14955 reg1->umin_value++; in regs_refine_cond_op()
14956 if (reg1->umax_value == (u64)val) in regs_refine_cond_op()
14957 reg1->umax_value--; in regs_refine_cond_op()
14958 if (reg1->smin_value == (s64)val) in regs_refine_cond_op()
14959 reg1->smin_value++; in regs_refine_cond_op()
14960 if (reg1->smax_value == (s64)val) in regs_refine_cond_op()
14961 reg1->smax_value--; in regs_refine_cond_op()
14974 * it's a single-bit value to begin with. in regs_refine_cond_op()
14983 t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val)); in regs_refine_cond_op()
14984 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
14986 reg1->var_off = tnum_or(reg1->var_off, tnum_const(val)); in regs_refine_cond_op()
14996 t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val)); in regs_refine_cond_op()
14997 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
14999 reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val)); in regs_refine_cond_op()
15004 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
15005 reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
15007 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
15008 reg2->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
15013 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1); in regs_refine_cond_op()
15014 reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value); in regs_refine_cond_op()
15016 reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1); in regs_refine_cond_op()
15017 reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value); in regs_refine_cond_op()
15022 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
15023 reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
15025 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
15026 reg2->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
15031 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1); in regs_refine_cond_op()
15032 reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value); in regs_refine_cond_op()
15034 reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1); in regs_refine_cond_op()
15035 reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value); in regs_refine_cond_op()
15045 * check, in which case we have a fake SCALAR_VALUE representing insn->imm).
15062 if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) in reg_set_min_max()
15086 if (type_may_be_null(reg->type) && reg->id == id && in mark_ptr_or_null_reg()
15087 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { in mark_ptr_or_null_reg()
15089 * known-zero, because we don't allow pointer arithmetic on in mark_ptr_or_null_reg()
15095 * is fine to expect to see reg->off. in mark_ptr_or_null_reg()
15097 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) in mark_ptr_or_null_reg()
15099 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && in mark_ptr_or_null_reg()
15100 WARN_ON_ONCE(reg->off)) in mark_ptr_or_null_reg()
15104 reg->type = SCALAR_VALUE; in mark_ptr_or_null_reg()
15109 reg->id = 0; in mark_ptr_or_null_reg()
15110 reg->ref_obj_id = 0; in mark_ptr_or_null_reg()
15118 /* For not-NULL ptr, reg->ref_obj_id will be reset in mark_ptr_or_null_reg()
15121 * reg->id is still used by spin_lock ptr. Other in mark_ptr_or_null_reg()
15122 * than spin_lock ptr type, reg->id can be reset. in mark_ptr_or_null_reg()
15124 reg->id = 0; in mark_ptr_or_null_reg()
15135 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs()
15136 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs()
15158 if (BPF_SRC(insn->code) != BPF_X) in try_match_pkt_pointers()
15161 /* Pointers are always 64-bit. */ in try_match_pkt_pointers()
15162 if (BPF_CLASS(insn->code) == BPF_JMP32) in try_match_pkt_pointers()
15165 switch (BPF_OP(insn->code)) { in try_match_pkt_pointers()
15167 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15168 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15169 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15173 dst_reg->type, false); in try_match_pkt_pointers()
15174 mark_pkt_end(other_branch, insn->dst_reg, true); in try_match_pkt_pointers()
15175 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15176 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15178 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15181 src_reg->type, true); in try_match_pkt_pointers()
15182 mark_pkt_end(this_branch, insn->src_reg, false); in try_match_pkt_pointers()
15188 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15189 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15190 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15194 dst_reg->type, true); in try_match_pkt_pointers()
15195 mark_pkt_end(this_branch, insn->dst_reg, false); in try_match_pkt_pointers()
15196 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15197 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15199 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15202 src_reg->type, false); in try_match_pkt_pointers()
15203 mark_pkt_end(other_branch, insn->src_reg, true); in try_match_pkt_pointers()
15209 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15210 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15211 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15215 dst_reg->type, true); in try_match_pkt_pointers()
15216 mark_pkt_end(other_branch, insn->dst_reg, false); in try_match_pkt_pointers()
15217 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15218 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15220 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15223 src_reg->type, false); in try_match_pkt_pointers()
15224 mark_pkt_end(this_branch, insn->src_reg, true); in try_match_pkt_pointers()
15230 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15231 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15232 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15236 dst_reg->type, false); in try_match_pkt_pointers()
15237 mark_pkt_end(this_branch, insn->dst_reg, true); in try_match_pkt_pointers()
15238 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15239 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15241 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15244 src_reg->type, true); in try_match_pkt_pointers()
15245 mark_pkt_end(other_branch, insn->src_reg, false); in try_match_pkt_pointers()
15262 if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id) in __collect_linked_regs()
15267 e->frameno = frameno; in __collect_linked_regs()
15268 e->is_reg = is_reg; in __collect_linked_regs()
15269 e->regno = spi_or_reg; in __collect_linked_regs()
15271 reg->id = 0; in __collect_linked_regs()
15276 * in verifier state, save R in linked_regs if R->id == id.
15287 for (i = vstate->curframe; i >= 0; i--) { in collect_linked_regs()
15288 func = vstate->frame[i]; in collect_linked_regs()
15290 reg = &func->regs[j]; in collect_linked_regs()
15293 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in collect_linked_regs()
15294 if (!is_spilled_reg(&func->stack[j])) in collect_linked_regs()
15296 reg = &func->stack[j].spilled_ptr; in collect_linked_regs()
15303 * if R->id == known_reg->id.
15313 for (i = 0; i < linked_regs->cnt; ++i) { in sync_linked_regs()
15314 e = &linked_regs->entries[i]; in sync_linked_regs()
15315 reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno] in sync_linked_regs()
15316 : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr; in sync_linked_regs()
15317 if (reg->type != SCALAR_VALUE || reg == known_reg) in sync_linked_regs()
15319 if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) in sync_linked_regs()
15321 if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || in sync_linked_regs()
15322 reg->off == known_reg->off) { in sync_linked_regs()
15323 s32 saved_subreg_def = reg->subreg_def; in sync_linked_regs()
15326 reg->subreg_def = saved_subreg_def; in sync_linked_regs()
15328 s32 saved_subreg_def = reg->subreg_def; in sync_linked_regs()
15329 s32 saved_off = reg->off; in sync_linked_regs()
15332 __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); in sync_linked_regs()
15340 reg->off = saved_off; in sync_linked_regs()
15341 reg->subreg_def = saved_subreg_def; in sync_linked_regs()
15345 reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); in sync_linked_regs()
15353 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
15355 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op()
15359 u8 opcode = BPF_OP(insn->code); in check_cond_jmp_op()
15361 int pred = -1; in check_cond_jmp_op()
15367 return -EINVAL; in check_cond_jmp_op()
15371 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in check_cond_jmp_op()
15374 if (insn->code != (BPF_JMP | BPF_JCOND) || in check_cond_jmp_op()
15375 insn->src_reg != BPF_MAY_GOTO || in check_cond_jmp_op()
15376 insn->dst_reg || insn->imm || insn->off == 0) { in check_cond_jmp_op()
15378 insn->off, insn->imm); in check_cond_jmp_op()
15379 return -EINVAL; in check_cond_jmp_op()
15381 prev_st = find_prev_entry(env, cur_st->parent, idx); in check_cond_jmp_op()
15386 return -ENOMEM; in check_cond_jmp_op()
15388 queued_st->may_goto_depth++; in check_cond_jmp_op()
15391 *insn_idx += insn->off; in check_cond_jmp_op()
15396 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
15400 dst_reg = &regs[insn->dst_reg]; in check_cond_jmp_op()
15401 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
15402 if (insn->imm != 0) { in check_cond_jmp_op()
15404 return -EINVAL; in check_cond_jmp_op()
15408 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
15412 src_reg = &regs[insn->src_reg]; in check_cond_jmp_op()
15414 is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
15416 insn->src_reg); in check_cond_jmp_op()
15417 return -EACCES; in check_cond_jmp_op()
15420 if (insn->src_reg != BPF_REG_0) { in check_cond_jmp_op()
15422 return -EINVAL; in check_cond_jmp_op()
15424 src_reg = &env->fake_reg[0]; in check_cond_jmp_op()
15426 src_reg->type = SCALAR_VALUE; in check_cond_jmp_op()
15427 __mark_reg_known(src_reg, insn->imm); in check_cond_jmp_op()
15430 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in check_cond_jmp_op()
15437 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
15438 if (BPF_SRC(insn->code) == BPF_X && !err && in check_cond_jmp_op()
15440 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
15446 /* Only follow the goto, ignore fall-through. If needed, push in check_cond_jmp_op()
15447 * the fall-through branch for simulation under speculative in check_cond_jmp_op()
15450 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
15453 return -EFAULT; in check_cond_jmp_op()
15454 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
15455 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
15456 *insn_idx += insn->off; in check_cond_jmp_op()
15459 /* Only follow the fall-through branch, since that's where the in check_cond_jmp_op()
15463 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
15465 *insn_idx + insn->off + 1, in check_cond_jmp_op()
15467 return -EFAULT; in check_cond_jmp_op()
15468 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
15469 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
15478 if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id) in check_cond_jmp_op()
15479 collect_linked_regs(this_branch, src_reg->id, &linked_regs); in check_cond_jmp_op()
15480 if (dst_reg->type == SCALAR_VALUE && dst_reg->id) in check_cond_jmp_op()
15481 collect_linked_regs(this_branch, dst_reg->id, &linked_regs); in check_cond_jmp_op()
15488 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
15491 return -EFAULT; in check_cond_jmp_op()
15492 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
15494 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
15496 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
15497 &other_branch_regs[insn->src_reg], in check_cond_jmp_op()
15499 } else /* BPF_SRC(insn->code) == BPF_K */ { in check_cond_jmp_op()
15504 memcpy(&env->fake_reg[1], &env->fake_reg[0], in check_cond_jmp_op()
15505 sizeof(env->fake_reg[0])); in check_cond_jmp_op()
15507 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
15508 &env->fake_reg[0], in check_cond_jmp_op()
15509 dst_reg, &env->fake_reg[1], in check_cond_jmp_op()
15515 if (BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
15516 src_reg->type == SCALAR_VALUE && src_reg->id && in check_cond_jmp_op()
15517 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { in check_cond_jmp_op()
15519 sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs); in check_cond_jmp_op()
15521 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && in check_cond_jmp_op()
15522 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { in check_cond_jmp_op()
15524 sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs); in check_cond_jmp_op()
15529 * E.g. register A - maybe null in check_cond_jmp_op()
15530 * register B - not null in check_cond_jmp_op()
15531 * for JNE A, B, ... - A is not null in the false branch; in check_cond_jmp_op()
15532 * for JEQ A, B, ... - A is not null in the true branch. in check_cond_jmp_op()
15539 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
15541 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && in check_cond_jmp_op()
15542 base_type(src_reg->type) != PTR_TO_BTF_ID && in check_cond_jmp_op()
15543 base_type(dst_reg->type) != PTR_TO_BTF_ID) { in check_cond_jmp_op()
15557 if (type_may_be_null(src_reg->type)) in check_cond_jmp_op()
15558 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); in check_cond_jmp_op()
15560 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
15568 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && in check_cond_jmp_op()
15569 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && in check_cond_jmp_op()
15570 type_may_be_null(dst_reg->type)) { in check_cond_jmp_op()
15574 mark_ptr_or_null_regs(this_branch, insn->dst_reg, in check_cond_jmp_op()
15576 mark_ptr_or_null_regs(other_branch, insn->dst_reg, in check_cond_jmp_op()
15578 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], in check_cond_jmp_op()
15580 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
15582 insn->dst_reg); in check_cond_jmp_op()
15583 return -EACCES; in check_cond_jmp_op()
15585 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
15586 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
15599 if (BPF_SIZE(insn->code) != BPF_DW) { in check_ld_imm()
15601 return -EINVAL; in check_ld_imm()
15603 if (insn->off != 0) { in check_ld_imm()
15605 return -EINVAL; in check_ld_imm()
15608 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
15612 dst_reg = &regs[insn->dst_reg]; in check_ld_imm()
15613 if (insn->src_reg == 0) { in check_ld_imm()
15614 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; in check_ld_imm()
15616 dst_reg->type = SCALAR_VALUE; in check_ld_imm()
15617 __mark_reg_known(&regs[insn->dst_reg], imm); in check_ld_imm()
15622 * we either succeed and assign a corresponding dst_reg->type after in check_ld_imm()
15625 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
15627 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { in check_ld_imm()
15628 dst_reg->type = aux->btf_var.reg_type; in check_ld_imm()
15629 switch (base_type(dst_reg->type)) { in check_ld_imm()
15631 dst_reg->mem_size = aux->btf_var.mem_size; in check_ld_imm()
15634 dst_reg->btf = aux->btf_var.btf; in check_ld_imm()
15635 dst_reg->btf_id = aux->btf_var.btf_id; in check_ld_imm()
15639 return -EFAULT; in check_ld_imm()
15644 if (insn->src_reg == BPF_PSEUDO_FUNC) { in check_ld_imm()
15645 struct bpf_prog_aux *aux = env->prog->aux; in check_ld_imm()
15647 env->insn_idx + insn->imm + 1); in check_ld_imm()
15649 if (!aux->func_info) { in check_ld_imm()
15651 return -EINVAL; in check_ld_imm()
15653 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { in check_ld_imm()
15655 return -EINVAL; in check_ld_imm()
15658 dst_reg->type = PTR_TO_FUNC; in check_ld_imm()
15659 dst_reg->subprogno = subprogno; in check_ld_imm()
15663 map = env->used_maps[aux->map_index]; in check_ld_imm()
15664 dst_reg->map_ptr = map; in check_ld_imm()
15666 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || in check_ld_imm()
15667 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { in check_ld_imm()
15668 if (map->map_type == BPF_MAP_TYPE_ARENA) { in check_ld_imm()
15672 dst_reg->type = PTR_TO_MAP_VALUE; in check_ld_imm()
15673 dst_reg->off = aux->map_off; in check_ld_imm()
15674 WARN_ON_ONCE(map->max_entries != 1); in check_ld_imm()
15675 /* We want reg->id to be same (0) as map_value is not distinct */ in check_ld_imm()
15676 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || in check_ld_imm()
15677 insn->src_reg == BPF_PSEUDO_MAP_IDX) { in check_ld_imm()
15678 dst_reg->type = CONST_PTR_TO_MAP; in check_ld_imm()
15681 return -EINVAL; in check_ld_imm()
15700 * - they can only appear in the programs where ctx == skb
15701 * - since they are wrappers of function calls, they scratch R1-R5 registers,
15702 * preserve R6-R9, and store return value into R0
15709 * IMM == 32-bit immediate
15712 * R0 - 8/16/32-bit skb data converted to cpu endianness
15718 u8 mode = BPF_MODE(insn->code); in check_ld_abs()
15721 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
15723 return -EINVAL; in check_ld_abs()
15726 if (!env->ops->gen_ld_abs) { in check_ld_abs()
15728 return -EINVAL; in check_ld_abs()
15731 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || in check_ld_abs()
15732 BPF_SIZE(insn->code) == BPF_DW || in check_ld_abs()
15733 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { in check_ld_abs()
15735 return -EINVAL; in check_ld_abs()
15753 if (env->cur_state->active_lock.ptr) { in check_ld_abs()
15754 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); in check_ld_abs()
15755 return -EINVAL; in check_ld_abs()
15758 if (env->cur_state->active_rcu_lock) { in check_ld_abs()
15759 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); in check_ld_abs()
15760 return -EINVAL; in check_ld_abs()
15763 if (env->cur_state->active_preempt_lock) { in check_ld_abs()
15764 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_preempt_disable-ed region\n"); in check_ld_abs()
15765 return -EINVAL; in check_ld_abs()
15771 return -EINVAL; in check_ld_abs()
15776 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
15796 /* ld_abs load up to 32-bit skb data. */ in check_ld_abs()
15797 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
15805 const struct bpf_prog *prog = env->prog; in check_return_code()
15808 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
15810 struct bpf_func_state *frame = env->cur_state->frame[0]; in check_return_code()
15811 const bool is_subprog = frame->subprogno; in check_return_code()
15814 /* LSM and struct_ops func-ptr's return type could be "void" */ in check_return_code()
15815 if (!is_subprog || frame->in_exception_callback_fn) { in check_return_code()
15818 if (prog->expected_attach_type == BPF_LSM_CGROUP) in check_return_code()
15819 /* See below, can be 0 or 0-1 depending on hook. */ in check_return_code()
15823 if (!prog->aux->attach_func_proto->type) in check_return_code()
15843 return -EACCES; in check_return_code()
15848 if (frame->in_async_callback_fn) { in check_return_code()
15855 if (is_subprog && !frame->in_exception_callback_fn) { in check_return_code()
15856 if (reg->type != SCALAR_VALUE) { in check_return_code()
15858 regno, reg_type_str(env, reg->type)); in check_return_code()
15859 return -EINVAL; in check_return_code()
15866 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
15867 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
15868 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || in check_return_code()
15869 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
15870 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
15871 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || in check_return_code()
15872 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
15873 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || in check_return_code()
15874 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) in check_return_code()
15876 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || in check_return_code()
15877 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) in check_return_code()
15881 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
15893 if (!env->prog->aux->attach_btf_id) in check_return_code()
15898 switch (env->prog->expected_attach_type) { in check_return_code()
15909 return -ENOTSUPP; in check_return_code()
15917 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { in check_return_code()
15919 if (!get_func_retval_range(env->prog, &range)) in check_return_code()
15925 } else if (!env->prog->aux->attach_func_proto->type) { in check_return_code()
15938 * depends on the to-be-replaced kernel func or bpf program. in check_return_code()
15945 if (reg->type != SCALAR_VALUE) { in check_return_code()
15947 exit_ctx, regno, reg_type_str(env, reg->type)); in check_return_code()
15948 return -EINVAL; in check_return_code()
15958 prog->expected_attach_type == BPF_LSM_CGROUP && in check_return_code()
15960 !prog->aux->attach_func_proto->type) in check_return_code()
15962 return -EINVAL; in check_return_code()
15966 tnum_in(enforce_attach_type_range, reg->var_off)) in check_return_code()
15967 env->prog->enforce_expected_attach_type = 1; in check_return_code()
15971 /* non-recursive DFS pseudo code
15972 * 1 procedure DFS-iterative(G,v):
15977 * 6 t <- S.peek()
15983 * 12 w <- G.adjacentVertex(t,e)
15985 * 14 label e as tree-edge
15990 * 19 label e as back-edge
15993 * 22 label e as forward- or cross-edge
15998 * 0x10 - discovered
15999 * 0x11 - discovered and fall-through edge labelled
16000 * 0x12 - discovered and fall-through and branch edges labelled
16001 * 0x20 - explored
16013 env->insn_aux_data[idx].prune_point = true; in mark_prune_point()
16018 return env->insn_aux_data[insn_idx].prune_point; in is_prune_point()
16023 env->insn_aux_data[idx].force_checkpoint = true; in mark_force_checkpoint()
16028 return env->insn_aux_data[insn_idx].force_checkpoint; in is_force_checkpoint()
16033 env->insn_aux_data[idx].calls_callback = true; in mark_calls_callback()
16038 return env->insn_aux_data[insn_idx].calls_callback; in calls_callback()
16046 /* t, w, e - match pseudo-code above:
16047 * t - index of current instruction
16048 * w - next instruction
16049 * e - edge
16053 int *insn_stack = env->cfg.insn_stack; in push_insn()
16054 int *insn_state = env->cfg.insn_state; in push_insn()
16062 if (w < 0 || w >= env->prog->len) { in push_insn()
16065 return -EINVAL; in push_insn()
16075 /* tree-edge */ in push_insn()
16078 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
16079 return -E2BIG; in push_insn()
16080 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
16083 if (env->bpf_capable) in push_insn()
16087 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
16088 return -EINVAL; in push_insn()
16090 /* forward- or cross-edge */ in push_insn()
16094 return -EFAULT; in push_insn()
16111 /* when we exit from subprog, we need to record non-linear history */ in visit_func_call_insn()
16122 #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
16127 * - includes R0 if function is non-void;
16128 * - includes R1-R5 if corresponding parameter has is described
16137 if (fn->ret_type != RET_VOID) in helper_fastcall_clobber_mask()
16139 for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) in helper_fastcall_clobber_mask()
16140 if (fn->arg_type[i] != ARG_DONTCARE) in helper_fastcall_clobber_mask()
16154 return env->prog->jit_requested && bpf_jit_supports_percpu_insn(); in verifier_inlines_helper_call()
16166 vlen = btf_type_vlen(meta->func_proto); in kfunc_fastcall_clobber_mask()
16168 if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type))) in kfunc_fastcall_clobber_mask()
16178 if (meta->btf == btf_vmlinux) in is_fastcall_kfunc_call()
16179 return meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || in is_fastcall_kfunc_call()
16180 meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast]; in is_fastcall_kfunc_call()
16188 * - R0 is scratched only if function is non-void;
16189 * - R1-R5 are scratched only if corresponding parameter type is defined
16196 * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5
16199 * - as a post-processing step, clang visits each bpf_fastcall call and adds
16200 * spill/fill for every live r0-r5;
16202 * - stack offsets used for the spill/fill are allocated as lowest
16206 * - when kernel loads a program, it looks for such patterns
16210 * - if so, and if verifier or current JIT inlines the call to the
16214 * - when old kernel loads a program, presence of spill/fill pairs
16221 * *(u64 *)(r10 - 8) = r1; r1 = 1;
16222 * *(u64 *)(r10 - 16) = r2; r2 = 2;
16223 * call %[to_be_inlined] --> call %[to_be_inlined]
16224 * r2 = *(u64 *)(r10 - 16); r0 = r1;
16225 * r1 = *(u64 *)(r10 - 8); r0 += r2;
16231 * - look for such patterns;
16232 * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern;
16233 * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction;
16234 * - update env->subprog_info[*]->fastcall_stack_off to find an offset
16236 * - update env->subprog_info[*]->keep_fastcall_stack.
16251 * *(u64 *)(r10 - 8) = r1; r1 = 1;
16252 * call %[to_be_inlined] --> call %[to_be_inlined]
16253 * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
16254 * r0 = *(u64 *)(r10 - 8); r0 += r1;
16262 struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; in mark_fastcall_pattern_for_call()
16263 struct bpf_insn *call = &env->prog->insnsi[insn_idx]; in mark_fastcall_pattern_for_call()
16272 if (get_helper_proto(env, call->imm, &fn) < 0) in mark_fastcall_pattern_for_call()
16276 can_be_inlined = fn->allow_fastcall && in mark_fastcall_pattern_for_call()
16277 (verifier_inlines_helper_call(env, call->imm) || in mark_fastcall_pattern_for_call()
16278 bpf_jit_inlines_helper_call(call->imm)); in mark_fastcall_pattern_for_call()
16302 * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0) in mark_fastcall_pattern_for_call()
16306 * rX = *(u64 *)(r10 - Y) in mark_fastcall_pattern_for_call()
16309 if (insn_idx - i < 0 || insn_idx + i >= env->prog->len) in mark_fastcall_pattern_for_call()
16311 stx = &insns[insn_idx - i]; in mark_fastcall_pattern_for_call()
16314 if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) || in mark_fastcall_pattern_for_call()
16315 ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) || in mark_fastcall_pattern_for_call()
16316 stx->dst_reg != BPF_REG_10 || in mark_fastcall_pattern_for_call()
16317 ldx->src_reg != BPF_REG_10) in mark_fastcall_pattern_for_call()
16320 if (stx->src_reg != ldx->dst_reg) in mark_fastcall_pattern_for_call()
16323 if ((BIT(stx->src_reg) & expected_regs_mask) == 0) in mark_fastcall_pattern_for_call()
16327 * is always 8-byte aligned. in mark_fastcall_pattern_for_call()
16329 if (stx->off != off || ldx->off != off) in mark_fastcall_pattern_for_call()
16331 expected_regs_mask &= ~BIT(stx->src_reg); in mark_fastcall_pattern_for_call()
16332 env->insn_aux_data[insn_idx - i].fastcall_pattern = 1; in mark_fastcall_pattern_for_call()
16333 env->insn_aux_data[insn_idx + i].fastcall_pattern = 1; in mark_fastcall_pattern_for_call()
16342 * 1: *(u64 *)(r10 - 8) = r1 in mark_fastcall_pattern_for_call()
16344 * 3: r1 = *(u64 *)(r10 - 8) in mark_fastcall_pattern_for_call()
16345 * 4: *(u64 *)(r10 - 8) = r1 in mark_fastcall_pattern_for_call()
16347 * 6: r1 = *(u64 *)(r10 - 8) in mark_fastcall_pattern_for_call()
16355 env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1; in mark_fastcall_pattern_for_call()
16357 subprog->keep_fastcall_stack = 1; in mark_fastcall_pattern_for_call()
16358 subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off); in mark_fastcall_pattern_for_call()
16363 struct bpf_subprog_info *subprog = env->subprog_info; in mark_fastcall_patterns()
16368 for (s = 0; s < env->subprog_cnt; ++s, ++subprog) { in mark_fastcall_patterns()
16371 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in mark_fastcall_patterns()
16372 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
16373 if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) || in mark_fastcall_patterns()
16374 insn->dst_reg != BPF_REG_10) in mark_fastcall_patterns()
16376 lowest_off = min(lowest_off, insn->off); in mark_fastcall_patterns()
16379 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in mark_fastcall_patterns()
16380 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
16381 if (insn->code != (BPF_JMP | BPF_CALL)) in mark_fastcall_patterns()
16390 * < 0 - an error occurred
16391 * DONE_EXPLORING - the instruction was fully explored
16392 * KEEP_EXPLORING - there is still work to be done before it is fully explored
16396 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; in visit_insn()
16402 /* All non-branch instructions have a single fall-through edge. */ in visit_insn()
16403 if (BPF_CLASS(insn->code) != BPF_JMP && in visit_insn()
16404 BPF_CLASS(insn->code) != BPF_JMP32) { in visit_insn()
16409 switch (BPF_OP(insn->code)) { in visit_insn()
16436 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in visit_insn()
16443 * is crucial for fast convergence of open-coded iterator loop in visit_insn()
16449 * It is expected that with correct open-coded iterators in visit_insn()
16456 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); in visit_insn()
16459 if (BPF_SRC(insn->code) != BPF_K) in visit_insn()
16460 return -EINVAL; in visit_insn()
16462 if (BPF_CLASS(insn->code) == BPF_JMP) in visit_insn()
16463 off = insn->off; in visit_insn()
16465 off = insn->imm; in visit_insn()
16487 return push_insn(t, t + insn->off + 1, BRANCH, env); in visit_insn()
16491 /* non-recursive depth-first-search to detect loops in BPF program
16492 * loop == back-edge in directed graph
16496 int insn_cnt = env->prog->len; in check_cfg()
16501 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
16503 return -ENOMEM; in check_cfg()
16505 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
16508 return -ENOMEM; in check_cfg()
16513 env->cfg.cur_stack = 1; in check_cfg()
16516 while (env->cfg.cur_stack > 0) { in check_cfg()
16517 int t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
16523 env->cfg.cur_stack--; in check_cfg()
16530 ret = -EFAULT; in check_cfg()
16536 if (env->cfg.cur_stack < 0) { in check_cfg()
16538 ret = -EFAULT; in check_cfg()
16542 if (env->exception_callback_subprog && !ex_done) { in check_cfg()
16543 ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start; in check_cfg()
16547 env->cfg.cur_stack = 1; in check_cfg()
16553 struct bpf_insn *insn = &env->prog->insnsi[i]; in check_cfg()
16557 ret = -EINVAL; in check_cfg()
16563 ret = -EINVAL; in check_cfg()
16574 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
16582 for (i = 1; i < env->subprog_cnt; i++) { in check_abnormal_return()
16583 if (env->subprog_info[i].has_ld_abs) { in check_abnormal_return()
16585 return -EINVAL; in check_abnormal_return()
16587 if (env->subprog_info[i].has_tail_call) { in check_abnormal_return()
16589 return -EINVAL; in check_abnormal_return()
16611 int ret = -ENOMEM; in check_btf_func_early()
16613 nfuncs = attr->func_info_cnt; in check_btf_func_early()
16616 return -EINVAL; in check_btf_func_early()
16620 urec_size = attr->func_info_rec_size; in check_btf_func_early()
16625 return -EINVAL; in check_btf_func_early()
16628 prog = env->prog; in check_btf_func_early()
16629 btf = prog->aux->btf; in check_btf_func_early()
16631 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func_early()
16636 return -ENOMEM; in check_btf_func_early()
16641 if (ret == -E2BIG) { in check_btf_func_early()
16649 ret = -EFAULT; in check_btf_func_early()
16655 ret = -EFAULT; in check_btf_func_early()
16660 ret = -EINVAL; in check_btf_func_early()
16683 func_proto = btf_type_by_id(btf, type->type); in check_btf_func_early()
16692 prog->aux->func_info = krecord; in check_btf_func_early()
16693 prog->aux->func_info_cnt = nfuncs; in check_btf_func_early()
16713 int ret = -ENOMEM; in check_btf_func()
16715 nfuncs = attr->func_info_cnt; in check_btf_func()
16718 return -EINVAL; in check_btf_func()
16721 if (nfuncs != env->subprog_cnt) { in check_btf_func()
16723 return -EINVAL; in check_btf_func()
16726 urec_size = attr->func_info_rec_size; in check_btf_func()
16728 prog = env->prog; in check_btf_func()
16729 btf = prog->aux->btf; in check_btf_func()
16731 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func()
16733 krecord = prog->aux->func_info; in check_btf_func()
16736 return -ENOMEM; in check_btf_func()
16740 ret = -EINVAL; in check_btf_func()
16742 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
16749 info_aux[i].linkage = BTF_INFO_VLEN(type->info); in check_btf_func()
16751 func_proto = btf_type_by_id(btf, type->type); in check_btf_func()
16753 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); in check_btf_func()
16756 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { in check_btf_func()
16760 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { in check_btf_func()
16768 prog->aux->func_info_aux = info_aux; in check_btf_func()
16778 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
16781 if (!aux->func_info) in adjust_btf_func()
16785 for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) in adjust_btf_func()
16786 aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
16804 nr_linfo = attr->line_info_cnt; in check_btf_line()
16808 return -EINVAL; in check_btf_line()
16810 rec_size = attr->line_info_rec_size; in check_btf_line()
16813 rec_size & (sizeof(u32) - 1)) in check_btf_line()
16814 return -EINVAL; in check_btf_line()
16822 return -ENOMEM; in check_btf_line()
16824 prog = env->prog; in check_btf_line()
16825 btf = prog->aux->btf; in check_btf_line()
16828 sub = env->subprog_info; in check_btf_line()
16829 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); in check_btf_line()
16835 if (err == -E2BIG) { in check_btf_line()
16840 err = -EFAULT; in check_btf_line()
16846 err = -EFAULT; in check_btf_line()
16853 * 2) bounded by prog->len in check_btf_line()
16862 linfo[i].insn_off >= prog->len) { in check_btf_line()
16863 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
16865 prog->len); in check_btf_line()
16866 err = -EINVAL; in check_btf_line()
16870 if (!prog->insnsi[linfo[i].insn_off].code) { in check_btf_line()
16874 err = -EINVAL; in check_btf_line()
16881 err = -EINVAL; in check_btf_line()
16885 if (s != env->subprog_cnt) { in check_btf_line()
16891 err = -EINVAL; in check_btf_line()
16900 if (s != env->subprog_cnt) { in check_btf_line()
16902 env->subprog_cnt - s, s); in check_btf_line()
16903 err = -EINVAL; in check_btf_line()
16907 prog->aux->linfo = linfo; in check_btf_line()
16908 prog->aux->nr_linfo = nr_linfo; in check_btf_line()
16926 struct bpf_prog *prog = env->prog; in check_core_relo()
16927 const struct btf *btf = prog->aux->btf; in check_core_relo()
16929 .log = &env->log, in check_core_relo()
16935 nr_core_relo = attr->core_relo_cnt; in check_core_relo()
16939 return -EINVAL; in check_core_relo()
16941 rec_size = attr->core_relo_rec_size; in check_core_relo()
16945 return -EINVAL; in check_core_relo()
16947 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); in check_core_relo()
16951 /* Unlike func_info and line_info, copy and apply each CO-RE in check_core_relo()
16958 if (err == -E2BIG) { in check_core_relo()
16963 err = -EFAULT; in check_core_relo()
16969 err = -EFAULT; in check_core_relo()
16973 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { in check_core_relo()
16974 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", in check_core_relo()
16975 i, core_relo.insn_off, prog->len); in check_core_relo()
16976 err = -EINVAL; in check_core_relo()
16981 &prog->insnsi[core_relo.insn_off / 8]); in check_core_relo()
16996 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info_early()
16998 return -EINVAL; in check_btf_info_early()
17002 btf = btf_get_by_fd(attr->prog_btf_fd); in check_btf_info_early()
17007 return -EACCES; in check_btf_info_early()
17009 env->prog->aux->btf = btf; in check_btf_info_early()
17023 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info()
17025 return -EINVAL; in check_btf_info()
17048 return old->umin_value <= cur->umin_value && in range_within()
17049 old->umax_value >= cur->umax_value && in range_within()
17050 old->smin_value <= cur->smin_value && in range_within()
17051 old->smax_value >= cur->smax_value && in range_within()
17052 old->u32_min_value <= cur->u32_min_value && in range_within()
17053 old->u32_max_value >= cur->u32_max_value && in range_within()
17054 old->s32_min_value <= cur->s32_min_value && in range_within()
17055 old->s32_max_value >= cur->s32_max_value; in range_within()
17070 struct bpf_id_pair *map = idmap->map; in check_ids()
17103 old_id = old_id ? old_id : ++idmap->tmp_id_gen; in check_scalar_ids()
17104 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; in check_scalar_ids()
17116 live = st->regs[i].live; in clean_func_state()
17118 st->regs[i].live |= REG_LIVE_DONE; in clean_func_state()
17123 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
17126 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
17127 live = st->stack[i].spilled_ptr.live; in clean_func_state()
17129 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; in clean_func_state()
17131 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); in clean_func_state()
17133 st->stack[i].slot_type[j] = STACK_INVALID; in clean_func_state()
17143 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) in clean_verifier_state()
17147 for (i = 0; i <= st->curframe; i++) in clean_verifier_state()
17148 clean_func_state(env, st->frame[i]); in clean_verifier_state()
17190 if (sl->state.branches) in clean_live_states()
17192 if (sl->state.insn_idx != insn || in clean_live_states()
17193 !same_callsites(&sl->state, cur)) in clean_live_states()
17195 clean_verifier_state(env, &sl->state); in clean_live_states()
17197 sl = sl->next; in clean_live_states()
17206 check_ids(rold->id, rcur->id, idmap) && in regs_exact()
17207 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regs_exact()
17224 if (!(rold->live & REG_LIVE_READ) && exact == NOT_EXACT) in regsafe()
17227 if (rold->type == NOT_INIT) { in regsafe()
17228 if (exact == NOT_EXACT || rcur->type == NOT_INIT) in regsafe()
17250 * a non-MAYBE_NULL variant. in regsafe()
17252 * non-MAYBE_NULL registers as well. in regsafe()
17254 if (rold->type != rcur->type) in regsafe()
17257 switch (base_type(rold->type)) { in regsafe()
17259 if (env->explore_alu_limits) { in regsafe()
17264 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
17266 if (!rold->precise && exact == NOT_EXACT) in regsafe()
17268 if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) in regsafe()
17270 if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off)) in regsafe()
17282 * First verification path is [1-6]: in regsafe()
17283 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; in regsafe()
17284 * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark in regsafe()
17286 * Next verification path is [1-4, 6]. in regsafe()
17289 * I. r6{.id=b}, r7{.id=b} via path 1-6; in regsafe()
17290 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. in regsafe()
17293 * --- in regsafe()
17297 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
17298 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
17309 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
17310 check_ids(rold->id, rcur->id, idmap) && in regsafe()
17311 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regsafe()
17317 * since someone could have accessed through (ptr - k), or in regsafe()
17318 * even done ptr -= k in a register, to get a safe access. in regsafe()
17320 if (rold->range > rcur->range) in regsafe()
17325 if (rold->off != rcur->off) in regsafe()
17328 if (!check_ids(rold->id, rcur->id, idmap)) in regsafe()
17332 tnum_in(rold->var_off, rcur->var_off); in regsafe()
17335 * the same stack frame, since fp-8 in foo != fp-8 in bar in regsafe()
17337 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; in regsafe()
17360 for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) { in is_stack_all_misc()
17361 if ((stack->slot_type[i] == STACK_MISC) || in is_stack_all_misc()
17362 (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack)) in is_stack_all_misc()
17374 return &stack->spilled_ptr; in scalar_reg_for_stack()
17392 for (i = 0; i < old->allocated_stack; i++) { in stacksafe()
17398 (i >= cur->allocated_stack || in stacksafe()
17399 old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
17400 cur->stack[spi].slot_type[i % BPF_REG_SIZE])) in stacksafe()
17403 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) in stacksafe()
17405 i += BPF_REG_SIZE - 1; in stacksafe()
17410 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
17413 if (env->allow_uninit_stack && in stacksafe()
17414 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
17420 if (i >= cur->allocated_stack) in stacksafe()
17423 /* 64-bit scalar spill vs all slots MISC and vice versa. in stacksafe()
17428 old_reg = scalar_reg_for_stack(env, &old->stack[spi]); in stacksafe()
17429 cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]); in stacksafe()
17433 i += BPF_REG_SIZE - 1; in stacksafe()
17438 * it will be safe with zero-initialized stack. in stacksafe()
17441 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
17442 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
17444 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
17445 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
17447 * this stack slot, but current has STACK_MISC -> in stacksafe()
17452 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
17455 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
17461 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} in stacksafe()
17463 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} in stacksafe()
17467 if (!regsafe(env, &old->stack[spi].spilled_ptr, in stacksafe()
17468 &cur->stack[spi].spilled_ptr, idmap, exact)) in stacksafe()
17472 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
17473 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
17474 if (old_reg->dynptr.type != cur_reg->dynptr.type || in stacksafe()
17475 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || in stacksafe()
17476 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
17480 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
17481 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
17488 if (old_reg->iter.btf != cur_reg->iter.btf || in stacksafe()
17489 old_reg->iter.btf_id != cur_reg->iter.btf_id || in stacksafe()
17490 old_reg->iter.state != cur_reg->iter.state || in stacksafe()
17491 /* ignore {old_reg,cur_reg}->iter.depth, see above */ in stacksafe()
17492 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
17512 if (old->acquired_refs != cur->acquired_refs) in refsafe()
17515 for (i = 0; i < old->acquired_refs; i++) { in refsafe()
17516 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap)) in refsafe()
17554 if (old->callback_depth > cur->callback_depth) in func_states_equal()
17558 if (!regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
17559 &env->idmap_scratch, exact)) in func_states_equal()
17562 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) in func_states_equal()
17565 if (!refsafe(old, cur, &env->idmap_scratch)) in func_states_equal()
17573 env->idmap_scratch.tmp_id_gen = env->id_gen; in reset_idmap_scratch()
17574 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); in reset_idmap_scratch()
17584 if (old->curframe != cur->curframe) in states_equal()
17590 * must never prune a non-speculative execution one. in states_equal()
17592 if (old->speculative && !cur->speculative) in states_equal()
17595 if (old->active_lock.ptr != cur->active_lock.ptr) in states_equal()
17601 if (!!old->active_lock.id != !!cur->active_lock.id) in states_equal()
17604 if (old->active_lock.id && in states_equal()
17605 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) in states_equal()
17608 if (old->active_rcu_lock != cur->active_rcu_lock) in states_equal()
17611 if (old->active_preempt_lock != cur->active_preempt_lock) in states_equal()
17614 if (old->in_sleepable != cur->in_sleepable) in states_equal()
17620 for (i = 0; i <= old->curframe; i++) { in states_equal()
17621 if (old->frame[i]->callsite != cur->frame[i]->callsite) in states_equal()
17623 if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) in states_equal()
17636 u8 parent_flag = parent_reg->live & REG_LIVE_READ; in propagate_liveness_reg()
17637 u8 flag = reg->live & REG_LIVE_READ; in propagate_liveness_reg()
17659 * straight-line code between a state and its parent. When we arrive at an
17660 * equivalent state (jump target or such) we didn't arrive by the straight-line
17662 * of the state's write marks. That's what 'parent == state->parent' comparison
17673 if (vparent->curframe != vstate->curframe) { in propagate_liveness()
17675 vparent->curframe, vstate->curframe); in propagate_liveness()
17676 return -EFAULT; in propagate_liveness()
17680 for (frame = 0; frame <= vstate->curframe; frame++) { in propagate_liveness()
17681 parent = vparent->frame[frame]; in propagate_liveness()
17682 state = vstate->frame[frame]; in propagate_liveness()
17683 parent_reg = parent->regs; in propagate_liveness()
17684 state_reg = state->regs; in propagate_liveness()
17685 /* We don't need to worry about FP liveness, it's read-only */ in propagate_liveness()
17686 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { in propagate_liveness()
17696 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
17697 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
17698 parent_reg = &parent->stack[i].spilled_ptr; in propagate_liveness()
17699 state_reg = &state->stack[i].spilled_ptr; in propagate_liveness()
17720 for (fr = old->curframe; fr >= 0; fr--) { in propagate_precision()
17721 state = old->frame[fr]; in propagate_precision()
17722 state_reg = state->regs; in propagate_precision()
17725 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
17726 !state_reg->precise || in propagate_precision()
17727 !(state_reg->live & REG_LIVE_READ)) in propagate_precision()
17729 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
17735 bt_set_frame_reg(&env->bt, fr, i); in propagate_precision()
17739 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
17740 if (!is_spilled_reg(&state->stack[i])) in propagate_precision()
17742 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
17743 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
17744 !state_reg->precise || in propagate_precision()
17745 !(state_reg->live & REG_LIVE_READ)) in propagate_precision()
17747 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
17750 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
17752 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
17754 bt_set_frame_slot(&env->bt, fr, i); in propagate_precision()
17772 int i, fr = cur->curframe; in states_maybe_looping()
17774 if (old->curframe != fr) in states_maybe_looping()
17777 fold = old->frame[fr]; in states_maybe_looping()
17778 fcur = cur->frame[fr]; in states_maybe_looping()
17780 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
17788 return env->insn_aux_data[insn_idx].is_iter_next; in is_iter_next_insn()
17798 * Here's a situation in pseudo-BPF assembly form:
17818 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
17825 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
17845 * while (x--) {} // <<-- infinite loop here
17855 for (fr = old->curframe; fr >= 0; fr--) { in iter_active_depths_differ()
17856 state = old->frame[fr]; in iter_active_depths_differ()
17857 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
17858 if (state->stack[i].slot_type[0] != STACK_ITER) in iter_active_depths_differ()
17861 slot = &state->stack[i].spilled_ptr; in iter_active_depths_differ()
17862 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) in iter_active_depths_differ()
17865 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; in iter_active_depths_differ()
17866 if (cur_slot->iter.depth != slot->iter.depth) in iter_active_depths_differ()
17877 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; in is_state_visited()
17881 force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) || in is_state_visited()
17883 cur->jmp_history_cnt > 40; in is_state_visited()
17886 * http://vger.kernel.org/bpfconf2019.html#session-1 in is_state_visited()
17894 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
17895 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
17905 if (sl->state.insn_idx != insn_idx) in is_state_visited()
17908 if (sl->state.branches) { in is_state_visited()
17909 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; in is_state_visited()
17911 if (frame->in_async_callback_fn && in is_state_visited()
17912 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { in is_state_visited()
17926 /* BPF open-coded iterators loop detection is special. in is_state_visited()
17942 * 1. r7 = -16 in is_state_visited()
17944 * 3. while (bpf_iter_num_next(&fp[-8])) { in is_state_visited()
17946 * 5. r7 = -32 in is_state_visited()
17956 * Here verifier would first visit path 1-3, create a checkpoint at 3 in is_state_visited()
17957 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does in is_state_visited()
17959 * comparison would discard current state with r7=-32 in is_state_visited()
17963 if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) { in is_state_visited()
17968 cur_frame = cur->frame[cur->curframe]; in is_state_visited()
17972 iter_reg = &cur_frame->regs[BPF_REG_1]; in is_state_visited()
17975 * no need for extra (re-)validations in is_state_visited()
17977 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); in is_state_visited()
17978 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; in is_state_visited()
17979 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { in is_state_visited()
17980 update_loop_entry(cur, &sl->state); in is_state_visited()
17987 if (sl->state.may_goto_depth != cur->may_goto_depth && in is_state_visited()
17988 states_equal(env, &sl->state, cur, RANGE_WITHIN)) { in is_state_visited()
17989 update_loop_entry(cur, &sl->state); in is_state_visited()
17994 if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) in is_state_visited()
17999 if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
18000 states_equal(env, &sl->state, cur, EXACT) && in is_state_visited()
18001 !iter_active_depths_differ(&sl->state, cur) && in is_state_visited()
18002 sl->state.may_goto_depth == cur->may_goto_depth && in is_state_visited()
18003 sl->state.callback_unroll_depth == cur->callback_unroll_depth) { in is_state_visited()
18007 print_verifier_state(env, cur->frame[cur->curframe], true); in is_state_visited()
18009 print_verifier_state(env, sl->state.frame[cur->curframe], true); in is_state_visited()
18010 return -EINVAL; in is_state_visited()
18019 * if r1 < 1000000 goto pc-2 in is_state_visited()
18026 env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
18027 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
18031 /* If sl->state is a part of a loop and this loop's entry is a part of in is_state_visited()
18038 * .---------> hdr All branches from 'succ' had been explored in is_state_visited()
18041 * | .------... Suppose states 'cur' and 'succ' correspond in is_state_visited()
18047 * | succ <- cur To check if that is the case, verify in is_state_visited()
18052 * '----' in is_state_visited()
18056 loop_entry = get_loop_entry(&sl->state); in is_state_visited()
18057 force_exact = loop_entry && loop_entry->branches > 0; in is_state_visited()
18058 if (states_equal(env, &sl->state, cur, force_exact ? RANGE_WITHIN : NOT_EXACT)) { in is_state_visited()
18062 sl->hit_cnt++; in is_state_visited()
18066 * If we have any write marks in env->cur_state, they in is_state_visited()
18073 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
18080 if (is_jmp_point(env, env->insn_idx)) in is_state_visited()
18082 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
18095 sl->miss_cnt++; in is_state_visited()
18104 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; in is_state_visited()
18105 if (sl->miss_cnt > sl->hit_cnt * n + n) { in is_state_visited()
18109 *pprev = sl->next; in is_state_visited()
18110 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && in is_state_visited()
18111 !sl->state.used_as_loop_entry) { in is_state_visited()
18112 u32 br = sl->state.branches; in is_state_visited()
18117 free_verifier_state(&sl->state, false); in is_state_visited()
18119 env->peak_states--; in is_state_visited()
18125 sl->next = env->free_list; in is_state_visited()
18126 env->free_list = sl; in is_state_visited()
18132 pprev = &sl->next; in is_state_visited()
18136 if (env->max_states_per_insn < states_cnt) in is_state_visited()
18137 env->max_states_per_insn = states_cnt; in is_state_visited()
18139 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
18151 * When looping the sl->state.branches will be > 0 and this state in is_state_visited()
18156 return -ENOMEM; in is_state_visited()
18157 env->total_states++; in is_state_visited()
18158 env->peak_states++; in is_state_visited()
18159 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
18160 env->prev_insn_processed = env->insn_processed; in is_state_visited()
18163 if (env->bpf_capable) in is_state_visited()
18167 new = &new_sl->state; in is_state_visited()
18174 new->insn_idx = insn_idx; in is_state_visited()
18175 WARN_ONCE(new->branches != 1, in is_state_visited()
18176 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); in is_state_visited()
18178 cur->parent = new; in is_state_visited()
18179 cur->first_insn_idx = insn_idx; in is_state_visited()
18180 cur->dfs_depth = new->dfs_depth + 1; in is_state_visited()
18182 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
18185 * registers connected. Only r6 - r9 of the callers are alive (pushed in is_state_visited()
18187 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to in is_state_visited()
18197 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
18198 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) in is_state_visited()
18199 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; in is_state_visited()
18201 cur->frame[j]->regs[i].live = REG_LIVE_NONE; in is_state_visited()
18205 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
18206 struct bpf_func_state *frame = cur->frame[j]; in is_state_visited()
18207 struct bpf_func_state *newframe = new->frame[j]; in is_state_visited()
18209 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
18210 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; in is_state_visited()
18211 frame->stack[i].spilled_ptr.parent = in is_state_visited()
18212 &newframe->stack[i].spilled_ptr; in is_state_visited()
18256 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; in save_aux_ptr_type()
18284 return -EINVAL; in save_aux_ptr_type()
18293 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check()
18294 struct bpf_verifier_state *state = env->cur_state; in do_check()
18295 struct bpf_insn *insns = env->prog->insnsi; in do_check()
18297 int insn_cnt = env->prog->len; in do_check()
18299 int prev_insn_idx = -1; in do_check()
18308 env->cur_hist_ent = NULL; in do_check()
18310 env->prev_insn_idx = prev_insn_idx; in do_check()
18311 if (env->insn_idx >= insn_cnt) { in do_check()
18313 env->insn_idx, insn_cnt); in do_check()
18314 return -EFAULT; in do_check()
18317 insn = &insns[env->insn_idx]; in do_check()
18318 class = BPF_CLASS(insn->code); in do_check()
18320 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
18323 env->insn_processed); in do_check()
18324 return -E2BIG; in do_check()
18327 state->last_insn_idx = env->prev_insn_idx; in do_check()
18329 if (is_prune_point(env, env->insn_idx)) { in do_check()
18330 err = is_state_visited(env, env->insn_idx); in do_check()
18335 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
18338 env->prev_insn_idx, env->insn_idx, in do_check()
18339 env->cur_state->speculative ? in do_check()
18342 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
18348 if (is_jmp_point(env, env->insn_idx)) { in do_check()
18355 return -EAGAIN; in do_check()
18360 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { in do_check()
18362 env->prev_insn_idx, env->insn_idx, in do_check()
18363 env->cur_state->speculative ? in do_check()
18365 print_verifier_state(env, state->frame[state->curframe], true); in do_check()
18369 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
18377 print_insn_state(env, state->frame[state->curframe]); in do_check()
18379 verbose_linfo(env, env->insn_idx, "; "); in do_check()
18380 env->prev_log_pos = env->log.end_pos; in do_check()
18381 verbose(env, "%d: ", env->insn_idx); in do_check()
18382 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
18383 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; in do_check()
18384 env->prev_log_pos = env->log.end_pos; in do_check()
18387 if (bpf_prog_is_offloaded(env->prog->aux)) { in do_check()
18388 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
18389 env->prev_insn_idx); in do_check()
18396 prev_insn_idx = env->insn_idx; in do_check()
18409 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
18413 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
18417 src_reg_type = regs[insn->src_reg].type; in do_check()
18422 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
18423 insn->off, BPF_SIZE(insn->code), in do_check()
18424 BPF_READ, insn->dst_reg, false, in do_check()
18425 BPF_MODE(insn->code) == BPF_MEMSX); in do_check()
18427 err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx"); in do_check()
18433 if (BPF_MODE(insn->code) == BPF_ATOMIC) { in do_check()
18434 err = check_atomic(env, env->insn_idx, insn); in do_check()
18437 env->insn_idx++; in do_check()
18441 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { in do_check()
18443 return -EINVAL; in do_check()
18447 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
18451 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
18455 dst_reg_type = regs[insn->dst_reg].type; in do_check()
18458 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
18459 insn->off, BPF_SIZE(insn->code), in do_check()
18460 BPF_WRITE, insn->src_reg, false, false); in do_check()
18470 if (BPF_MODE(insn->code) != BPF_MEM || in do_check()
18471 insn->src_reg != BPF_REG_0) { in do_check()
18473 return -EINVAL; in do_check()
18476 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
18480 dst_reg_type = regs[insn->dst_reg].type; in do_check()
18483 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
18484 insn->off, BPF_SIZE(insn->code), in do_check()
18485 BPF_WRITE, -1, false, false); in do_check()
18493 u8 opcode = BPF_OP(insn->code); in do_check()
18495 env->jmps_processed++; in do_check()
18497 if (BPF_SRC(insn->code) != BPF_K || in do_check()
18498 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL in do_check()
18499 && insn->off != 0) || in do_check()
18500 (insn->src_reg != BPF_REG_0 && in do_check()
18501 insn->src_reg != BPF_PSEUDO_CALL && in do_check()
18502 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || in do_check()
18503 insn->dst_reg != BPF_REG_0 || in do_check()
18506 return -EINVAL; in do_check()
18509 if (env->cur_state->active_lock.ptr) { in do_check()
18510 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || in do_check()
18511 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && in do_check()
18512 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { in do_check()
18514 return -EINVAL; in do_check()
18517 if (insn->src_reg == BPF_PSEUDO_CALL) { in do_check()
18518 err = check_func_call(env, insn, &env->insn_idx); in do_check()
18519 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_check()
18520 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check()
18526 err = check_helper_call(env, insn, &env->insn_idx); in do_check()
18533 if (BPF_SRC(insn->code) != BPF_K || in do_check()
18534 insn->src_reg != BPF_REG_0 || in do_check()
18535 insn->dst_reg != BPF_REG_0 || in do_check()
18536 (class == BPF_JMP && insn->imm != 0) || in do_check()
18537 (class == BPF_JMP32 && insn->off != 0)) { in do_check()
18539 return -EINVAL; in do_check()
18543 env->insn_idx += insn->off + 1; in do_check()
18545 env->insn_idx += insn->imm + 1; in do_check()
18549 if (BPF_SRC(insn->code) != BPF_K || in do_check()
18550 insn->imm != 0 || in do_check()
18551 insn->src_reg != BPF_REG_0 || in do_check()
18552 insn->dst_reg != BPF_REG_0 || in do_check()
18555 return -EINVAL; in do_check()
18558 if (env->cur_state->active_lock.ptr && !env->cur_state->curframe) { in do_check()
18560 return -EINVAL; in do_check()
18563 if (env->cur_state->active_rcu_lock && !env->cur_state->curframe) { in do_check()
18565 return -EINVAL; in do_check()
18568 if (env->cur_state->active_preempt_lock && !env->cur_state->curframe) { in do_check()
18570 env->cur_state->active_preempt_lock, in do_check()
18571 env->cur_state->active_preempt_lock == 1 ? " is" : "(s) are"); in do_check()
18572 return -EINVAL; in do_check()
18577 * state->curframe > 0, it may be a callback in do_check()
18598 if (state->curframe) { in do_check()
18600 err = prepare_func_exit(env, &env->insn_idx); in do_check()
18612 update_branch_counts(env, env->cur_state); in do_check()
18614 &env->insn_idx, pop_log); in do_check()
18616 if (err != -ENOENT) in do_check()
18624 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
18629 u8 mode = BPF_MODE(insn->code); in do_check()
18641 env->insn_idx++; in do_check()
18645 return -EINVAL; in do_check()
18649 return -EINVAL; in do_check()
18652 env->insn_idx++; in do_check()
18677 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) in find_btf_percpu_datasec()
18680 tname = btf_name_by_offset(btf, t->name_off); in find_btf_percpu_datasec()
18685 return -ENOENT; in find_btf_percpu_datasec()
18699 u32 type, id = insn->imm; in check_pseudo_btf_id()
18710 return -EINVAL; in check_pseudo_btf_id()
18715 return -EINVAL; in check_pseudo_btf_id()
18724 err = -ENOENT; in check_pseudo_btf_id()
18730 err = -EINVAL; in check_pseudo_btf_id()
18734 sym_name = btf_name_by_offset(btf, t->name_off); in check_pseudo_btf_id()
18739 err = -ENOENT; in check_pseudo_btf_id()
18746 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in check_pseudo_btf_id()
18747 aux->btf_var.mem_size = 0; in check_pseudo_btf_id()
18755 if (vsi->type == id) { in check_pseudo_btf_id()
18762 type = t->type; in check_pseudo_btf_id()
18765 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; in check_pseudo_btf_id()
18766 aux->btf_var.btf = btf; in check_pseudo_btf_id()
18767 aux->btf_var.btf_id = type; in check_pseudo_btf_id()
18776 tname = btf_name_by_offset(btf, t->name_off); in check_pseudo_btf_id()
18779 err = -EINVAL; in check_pseudo_btf_id()
18782 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in check_pseudo_btf_id()
18783 aux->btf_var.mem_size = tsize; in check_pseudo_btf_id()
18785 aux->btf_var.reg_type = PTR_TO_BTF_ID; in check_pseudo_btf_id()
18786 aux->btf_var.btf = btf; in check_pseudo_btf_id()
18787 aux->btf_var.btf_id = type; in check_pseudo_btf_id()
18791 for (i = 0; i < env->used_btf_cnt; i++) { in check_pseudo_btf_id()
18792 if (env->used_btfs[i].btf == btf) { in check_pseudo_btf_id()
18798 if (env->used_btf_cnt >= MAX_USED_BTFS) { in check_pseudo_btf_id()
18799 err = -E2BIG; in check_pseudo_btf_id()
18803 btf_mod = &env->used_btfs[env->used_btf_cnt]; in check_pseudo_btf_id()
18804 btf_mod->btf = btf; in check_pseudo_btf_id()
18805 btf_mod->module = NULL; in check_pseudo_btf_id()
18809 btf_mod->module = btf_try_get_module(btf); in check_pseudo_btf_id()
18810 if (!btf_mod->module) { in check_pseudo_btf_id()
18811 err = -ENXIO; in check_pseudo_btf_id()
18816 env->used_btf_cnt++; in check_pseudo_btf_id()
18845 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || in check_map_prog_compatibility()
18846 btf_record_has_field(map->record, BPF_RB_ROOT)) { in check_map_prog_compatibility()
18849 return -EINVAL; in check_map_prog_compatibility()
18853 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in check_map_prog_compatibility()
18856 return -EINVAL; in check_map_prog_compatibility()
18861 return -EINVAL; in check_map_prog_compatibility()
18865 if (btf_record_has_field(map->record, BPF_TIMER)) { in check_map_prog_compatibility()
18868 return -EINVAL; in check_map_prog_compatibility()
18872 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) { in check_map_prog_compatibility()
18875 return -EINVAL; in check_map_prog_compatibility()
18879 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && in check_map_prog_compatibility()
18882 return -EINVAL; in check_map_prog_compatibility()
18885 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in check_map_prog_compatibility()
18887 return -EINVAL; in check_map_prog_compatibility()
18890 if (prog->sleepable) in check_map_prog_compatibility()
18891 switch (map->map_type) { in check_map_prog_compatibility()
18913 return -EINVAL; in check_map_prog_compatibility()
18921 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || in bpf_map_is_cgroup_storage()
18922 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); in bpf_map_is_cgroup_storage()
18943 for (i = 0; i < env->used_map_cnt; i++) { in add_used_map_from_fd()
18944 if (env->used_maps[i] == map) { in add_used_map_from_fd()
18950 if (env->used_map_cnt >= MAX_USED_MAPS) { in add_used_map_from_fd()
18953 return -E2BIG; in add_used_map_from_fd()
18956 if (env->prog->sleepable) in add_used_map_from_fd()
18957 atomic64_inc(&map->sleepable_refcnt); in add_used_map_from_fd()
18967 env->used_maps[env->used_map_cnt++] = map; in add_used_map_from_fd()
18969 return env->used_map_cnt - 1; in add_used_map_from_fd()
18981 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
18982 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
18985 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
18990 if (BPF_CLASS(insn->code) == BPF_LDX && in resolve_pseudo_ldimm64()
18991 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || in resolve_pseudo_ldimm64()
18992 insn->imm != 0)) { in resolve_pseudo_ldimm64()
18994 return -EINVAL; in resolve_pseudo_ldimm64()
19005 if (i == insn_cnt - 1 || insn[1].code != 0 || in resolve_pseudo_ldimm64()
19009 return -EINVAL; in resolve_pseudo_ldimm64()
19013 /* valid generic load 64-bit imm */ in resolve_pseudo_ldimm64()
19017 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
19025 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
19026 aux->ptr_type = PTR_TO_FUNC; in resolve_pseudo_ldimm64()
19031 * converted into regular 64-bit imm load insn. in resolve_pseudo_ldimm64()
19044 return -EINVAL; in resolve_pseudo_ldimm64()
19050 if (bpfptr_is_null(env->fd_array)) { in resolve_pseudo_ldimm64()
19052 return -EPROTO; in resolve_pseudo_ldimm64()
19054 if (copy_from_bpfptr_offset(&fd, env->fd_array, in resolve_pseudo_ldimm64()
19057 return -EFAULT; in resolve_pseudo_ldimm64()
19067 map = env->used_maps[map_idx]; in resolve_pseudo_ldimm64()
19069 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
19070 aux->map_index = map_idx; in resolve_pseudo_ldimm64()
19072 err = check_map_prog_compatibility(env, map, env->prog); in resolve_pseudo_ldimm64()
19084 return -EINVAL; in resolve_pseudo_ldimm64()
19087 if (!map->ops->map_direct_value_addr) { in resolve_pseudo_ldimm64()
19089 return -EINVAL; in resolve_pseudo_ldimm64()
19092 err = map->ops->map_direct_value_addr(map, &addr, off); in resolve_pseudo_ldimm64()
19095 map->value_size, off); in resolve_pseudo_ldimm64()
19099 aux->map_off = off; in resolve_pseudo_ldimm64()
19111 bpf_cgroup_storage_assign(env->prog->aux, map)) { in resolve_pseudo_ldimm64()
19113 return -EBUSY; in resolve_pseudo_ldimm64()
19115 if (map->map_type == BPF_MAP_TYPE_ARENA) { in resolve_pseudo_ldimm64()
19116 if (env->prog->aux->arena) { in resolve_pseudo_ldimm64()
19118 return -EBUSY; in resolve_pseudo_ldimm64()
19120 if (!env->allow_ptr_leaks || !env->bpf_capable) { in resolve_pseudo_ldimm64()
19122 return -EPERM; in resolve_pseudo_ldimm64()
19124 if (!env->prog->jit_requested) { in resolve_pseudo_ldimm64()
19126 return -EOPNOTSUPP; in resolve_pseudo_ldimm64()
19130 return -EOPNOTSUPP; in resolve_pseudo_ldimm64()
19132 env->prog->aux->arena = (void *)map; in resolve_pseudo_ldimm64()
19133 if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { in resolve_pseudo_ldimm64()
19135 return -EINVAL; in resolve_pseudo_ldimm64()
19146 if (!bpf_opcode_in_insntable(insn->code)) { in resolve_pseudo_ldimm64()
19147 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
19148 return -EINVAL; in resolve_pseudo_ldimm64()
19162 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
19163 env->used_map_cnt); in release_maps()
19169 __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt); in release_btfs()
19175 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
19176 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
19180 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) in convert_pseudo_ld_imm64()
19182 if (insn->src_reg == BPF_PSEUDO_FUNC) in convert_pseudo_ld_imm64()
19184 insn->src_reg = 0; in convert_pseudo_ld_imm64()
19188 /* single env->prog->insni[off] instruction was replaced with the range
19196 struct bpf_insn_aux_data *old_data = env->insn_aux_data; in adjust_insn_aux_data()
19197 struct bpf_insn *insn = new_prog->insnsi; in adjust_insn_aux_data()
19206 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
19210 prog_len = new_prog->len; in adjust_insn_aux_data()
19213 memcpy(new_data + off + cnt - 1, old_data + off, in adjust_insn_aux_data()
19214 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); in adjust_insn_aux_data()
19215 for (i = off; i < off + cnt - 1; i++) { in adjust_insn_aux_data()
19220 env->insn_aux_data = new_data; in adjust_insn_aux_data()
19231 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
19232 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
19234 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
19240 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in adjust_poke_descs()
19241 int i, sz = prog->aux->size_poke_tab; in adjust_poke_descs()
19246 if (desc->insn_idx <= off) in adjust_poke_descs()
19248 desc->insn_idx += len - 1; in adjust_poke_descs()
19259 new_data = vzalloc(array_size(env->prog->len + len - 1, in bpf_patch_insn_data()
19265 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
19267 if (PTR_ERR(new_prog) == -ERANGE) in bpf_patch_insn_data()
19269 "insn %d cannot be patched due to 16-bit range\n", in bpf_patch_insn_data()
19270 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
19286 struct bpf_insn *insn = prog->insnsi; in adjust_jmp_off()
19287 u32 insn_cnt = prog->len, i; in adjust_jmp_off()
19292 u8 code = insn->code; in adjust_jmp_off()
19301 if (insn->code == (BPF_JMP32 | BPF_JA)) { in adjust_jmp_off()
19302 if (i + 1 + insn->imm != tgt_idx) in adjust_jmp_off()
19304 if (check_add_overflow(insn->imm, delta, &imm)) in adjust_jmp_off()
19305 return -ERANGE; in adjust_jmp_off()
19306 insn->imm = imm; in adjust_jmp_off()
19308 if (i + 1 + insn->off != tgt_idx) in adjust_jmp_off()
19310 if (check_add_overflow(insn->off, delta, &off)) in adjust_jmp_off()
19311 return -ERANGE; in adjust_jmp_off()
19312 insn->off = off; in adjust_jmp_off()
19324 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
19325 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
19328 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
19329 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
19334 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
19335 j--; in adjust_subprog_starts_after_remove()
19338 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
19342 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
19344 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
19345 env->subprog_info + j, in adjust_subprog_starts_after_remove()
19346 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
19347 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
19350 if (aux->func_info) { in adjust_subprog_starts_after_remove()
19351 move = aux->func_info_cnt - j; in adjust_subprog_starts_after_remove()
19353 memmove(aux->func_info + i, in adjust_subprog_starts_after_remove()
19354 aux->func_info + j, in adjust_subprog_starts_after_remove()
19355 sizeof(*aux->func_info) * move); in adjust_subprog_starts_after_remove()
19356 aux->func_info_cnt -= j - i; in adjust_subprog_starts_after_remove()
19357 /* func_info->insn_off is set after all code rewrites, in adjust_subprog_starts_after_remove()
19358 * in adjust_btf_func() - no need to adjust in adjust_subprog_starts_after_remove()
19363 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
19368 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
19369 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
19377 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
19381 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
19385 linfo = prog->aux->linfo; in bpf_adj_linfo_after_remove()
19401 * last removed linfo. prog is already modified, so prog->len == off in bpf_adj_linfo_after_remove()
19404 if (prog->len != off && l_cnt && in bpf_adj_linfo_after_remove()
19406 l_cnt--; in bpf_adj_linfo_after_remove()
19407 linfo[--i].insn_off = off + cnt; in bpf_adj_linfo_after_remove()
19413 sizeof(*linfo) * (nr_linfo - i)); in bpf_adj_linfo_after_remove()
19415 prog->aux->nr_linfo -= l_cnt; in bpf_adj_linfo_after_remove()
19416 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
19421 linfo[i].insn_off -= cnt; in bpf_adj_linfo_after_remove()
19424 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
19425 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
19429 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
19430 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
19432 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
19440 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
19441 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
19444 if (bpf_prog_is_offloaded(env->prog->aux)) in verifier_remove_insns()
19447 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
19460 sizeof(*aux_data) * (orig_prog_len - off - cnt)); in verifier_remove_insns()
19467 * have dead code too. Therefore replace all dead at-run-time code
19468 * with 'ja -1'.
19478 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
19479 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); in sanitize_dead_code()
19480 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
19481 const int insn_cnt = env->prog->len; in sanitize_dead_code()
19508 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
19510 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
19511 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
19515 if (!insn_is_cond_jump(insn->code)) in opt_hard_wire_dead_code_branches()
19519 ja.off = insn->off; in opt_hard_wire_dead_code_branches()
19520 else if (!aux_data[i + 1 + insn->off].seen) in opt_hard_wire_dead_code_branches()
19525 if (bpf_prog_is_offloaded(env->prog->aux)) in opt_hard_wire_dead_code_branches()
19534 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
19535 int insn_cnt = env->prog->len; in opt_remove_dead_code()
19550 insn_cnt = env->prog->len; in opt_remove_dead_code()
19561 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
19562 int insn_cnt = env->prog->len; in opt_remove_nops()
19572 insn_cnt--; in opt_remove_nops()
19573 i--; in opt_remove_nops()
19583 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
19584 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
19585 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
19589 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; in opt_subreg_zext_lo32_rnd_hi32()
19610 if (load_reg == -1) in opt_subreg_zext_lo32_rnd_hi32()
19638 /* Add in an zero-extend instruction if a) the JIT has requested in opt_subreg_zext_lo32_rnd_hi32()
19642 * R0, therefore always zero-extends. However some archs' in opt_subreg_zext_lo32_rnd_hi32()
19645 * orthogonal to the general zero-extension behaviour of the in opt_subreg_zext_lo32_rnd_hi32()
19651 /* Zero-extension is done by the caller. */ in opt_subreg_zext_lo32_rnd_hi32()
19655 if (WARN_ON(load_reg == -1)) { in opt_subreg_zext_lo32_rnd_hi32()
19657 return -EFAULT; in opt_subreg_zext_lo32_rnd_hi32()
19668 return -ENOMEM; in opt_subreg_zext_lo32_rnd_hi32()
19669 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
19670 insns = new_prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
19671 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
19672 delta += patch_len - 1; in opt_subreg_zext_lo32_rnd_hi32()
19680 * struct __sk_buff -> struct sk_buff
19681 * struct bpf_sock_ops -> struct sock
19685 struct bpf_subprog_info *subprogs = env->subprog_info; in convert_ctx_accesses()
19686 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
19688 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
19689 struct bpf_insn *epilogue_buf = env->epilogue_buf; in convert_ctx_accesses()
19690 struct bpf_insn *insn_buf = env->insn_buf; in convert_ctx_accesses()
19698 if (ops->gen_epilogue) { in convert_ctx_accesses()
19699 epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, in convert_ctx_accesses()
19700 -(subprogs[0].stack_depth + 8)); in convert_ctx_accesses()
19703 return -EINVAL; in convert_ctx_accesses()
19709 -subprogs[0].stack_depth); in convert_ctx_accesses()
19710 insn_buf[cnt++] = env->prog->insnsi[0]; in convert_ctx_accesses()
19713 return -ENOMEM; in convert_ctx_accesses()
19714 env->prog = new_prog; in convert_ctx_accesses()
19715 delta += cnt - 1; in convert_ctx_accesses()
19719 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
19720 if (!ops->gen_prologue) { in convert_ctx_accesses()
19722 return -EINVAL; in convert_ctx_accesses()
19724 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
19725 env->prog); in convert_ctx_accesses()
19728 return -EINVAL; in convert_ctx_accesses()
19732 return -ENOMEM; in convert_ctx_accesses()
19734 env->prog = new_prog; in convert_ctx_accesses()
19735 delta += cnt - 1; in convert_ctx_accesses()
19740 WARN_ON(adjust_jmp_off(env->prog, 0, delta)); in convert_ctx_accesses()
19742 if (bpf_prog_is_offloaded(env->prog->aux)) in convert_ctx_accesses()
19745 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
19751 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
19752 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
19753 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
19754 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
19755 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || in convert_ctx_accesses()
19756 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || in convert_ctx_accesses()
19757 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { in convert_ctx_accesses()
19759 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
19760 insn->code == (BPF_STX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
19761 insn->code == (BPF_STX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
19762 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
19763 insn->code == (BPF_ST | BPF_MEM | BPF_B) || in convert_ctx_accesses()
19764 insn->code == (BPF_ST | BPF_MEM | BPF_H) || in convert_ctx_accesses()
19765 insn->code == (BPF_ST | BPF_MEM | BPF_W) || in convert_ctx_accesses()
19766 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { in convert_ctx_accesses()
19768 } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) || in convert_ctx_accesses()
19769 insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) && in convert_ctx_accesses()
19770 env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) { in convert_ctx_accesses()
19771 insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); in convert_ctx_accesses()
19772 env->prog->aux->num_exentries++; in convert_ctx_accesses()
19774 } else if (insn->code == (BPF_JMP | BPF_EXIT) && in convert_ctx_accesses()
19780 insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1); in convert_ctx_accesses()
19798 env->insn_aux_data[i + delta].sanitize_stack_spill) { in convert_ctx_accesses()
19807 return -ENOMEM; in convert_ctx_accesses()
19809 delta += cnt - 1; in convert_ctx_accesses()
19810 env->prog = new_prog; in convert_ctx_accesses()
19811 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
19815 switch ((int)env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
19817 if (!ops->convert_ctx_access) in convert_ctx_accesses()
19819 convert_ctx_access = ops->convert_ctx_access; in convert_ctx_accesses()
19841 if (BPF_MODE(insn->code) == BPF_MEM) in convert_ctx_accesses()
19842 insn->code = BPF_LDX | BPF_PROBE_MEM | in convert_ctx_accesses()
19843 BPF_SIZE((insn)->code); in convert_ctx_accesses()
19845 insn->code = BPF_LDX | BPF_PROBE_MEMSX | in convert_ctx_accesses()
19846 BPF_SIZE((insn)->code); in convert_ctx_accesses()
19847 env->prog->aux->num_exentries++; in convert_ctx_accesses()
19851 if (BPF_MODE(insn->code) == BPF_MEMSX) { in convert_ctx_accesses()
19853 return -EOPNOTSUPP; in convert_ctx_accesses()
19855 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code); in convert_ctx_accesses()
19856 env->prog->aux->num_exentries++; in convert_ctx_accesses()
19862 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
19864 mode = BPF_MODE(insn->code); in convert_ctx_accesses()
19867 * convert to a 4/8-byte load, to minimum program type specific in convert_ctx_accesses()
19868 * convert_ctx_access changes. If conversion is successful, in convert_ctx_accesses()
19873 off = insn->off; in convert_ctx_accesses()
19879 return -EINVAL; in convert_ctx_accesses()
19888 insn->off = off & ~(size_default - 1); in convert_ctx_accesses()
19889 insn->code = BPF_LDX | BPF_MEM | size_code; in convert_ctx_accesses()
19893 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
19898 return -EINVAL; in convert_ctx_accesses()
19906 return -EINVAL; in convert_ctx_accesses()
19911 insn->dst_reg, in convert_ctx_accesses()
19913 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
19914 (1 << size * 8) - 1); in convert_ctx_accesses()
19918 insn->dst_reg, in convert_ctx_accesses()
19920 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
19921 (1ULL << size * 8) - 1); in convert_ctx_accesses()
19926 insn->dst_reg, insn->dst_reg, in convert_ctx_accesses()
19932 return -ENOMEM; in convert_ctx_accesses()
19934 delta += cnt - 1; in convert_ctx_accesses()
19937 env->prog = new_prog; in convert_ctx_accesses()
19938 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
19946 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
19953 if (env->subprog_cnt <= 1) in jit_subprogs()
19956 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
19961 * need a hard reject of the program. Thus -EFAULT is in jit_subprogs()
19964 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
19967 i + insn->imm + 1); in jit_subprogs()
19968 return -EFAULT; in jit_subprogs()
19973 insn->off = subprog; in jit_subprogs()
19977 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
19979 insn->imm = 1; in jit_subprogs()
19999 err = -ENOMEM; in jit_subprogs()
20000 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
20004 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20006 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
20008 len = subprog_end - subprog_start; in jit_subprogs()
20012 * func[i]->stats will never be accessed and stays NULL in jit_subprogs()
20017 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], in jit_subprogs()
20019 func[i]->type = prog->type; in jit_subprogs()
20020 func[i]->len = len; in jit_subprogs()
20023 func[i]->is_func = 1; in jit_subprogs()
20024 func[i]->sleepable = prog->sleepable; in jit_subprogs()
20025 func[i]->aux->func_idx = i; in jit_subprogs()
20026 /* Below members will be freed only at prog->aux */ in jit_subprogs()
20027 func[i]->aux->btf = prog->aux->btf; in jit_subprogs()
20028 func[i]->aux->func_info = prog->aux->func_info; in jit_subprogs()
20029 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; in jit_subprogs()
20030 func[i]->aux->poke_tab = prog->aux->poke_tab; in jit_subprogs()
20031 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; in jit_subprogs()
20033 for (j = 0; j < prog->aux->size_poke_tab; j++) { in jit_subprogs()
20036 poke = &prog->aux->poke_tab[j]; in jit_subprogs()
20037 if (poke->insn_idx < subprog_end && in jit_subprogs()
20038 poke->insn_idx >= subprog_start) in jit_subprogs()
20039 poke->aux = func[i]->aux; in jit_subprogs()
20042 func[i]->aux->name[0] = 'F'; in jit_subprogs()
20043 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
20044 func[i]->jit_requested = 1; in jit_subprogs()
20045 func[i]->blinding_requested = prog->blinding_requested; in jit_subprogs()
20046 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; in jit_subprogs()
20047 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; in jit_subprogs()
20048 func[i]->aux->linfo = prog->aux->linfo; in jit_subprogs()
20049 func[i]->aux->nr_linfo = prog->aux->nr_linfo; in jit_subprogs()
20050 func[i]->aux->jited_linfo = prog->aux->jited_linfo; in jit_subprogs()
20051 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
20052 func[i]->aux->arena = prog->aux->arena; in jit_subprogs()
20054 insn = func[i]->insnsi; in jit_subprogs()
20055 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
20056 if (BPF_CLASS(insn->code) == BPF_LDX && in jit_subprogs()
20057 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in jit_subprogs()
20058 BPF_MODE(insn->code) == BPF_PROBE_MEM32 || in jit_subprogs()
20059 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) in jit_subprogs()
20061 if ((BPF_CLASS(insn->code) == BPF_STX || in jit_subprogs()
20062 BPF_CLASS(insn->code) == BPF_ST) && in jit_subprogs()
20063 BPF_MODE(insn->code) == BPF_PROBE_MEM32) in jit_subprogs()
20065 if (BPF_CLASS(insn->code) == BPF_STX && in jit_subprogs()
20066 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) in jit_subprogs()
20069 func[i]->aux->num_exentries = num_exentries; in jit_subprogs()
20070 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; in jit_subprogs()
20071 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; in jit_subprogs()
20073 func[i]->aux->exception_boundary = env->seen_exception; in jit_subprogs()
20075 if (!func[i]->jited) { in jit_subprogs()
20076 err = -ENOTSUPP; in jit_subprogs()
20086 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20087 insn = func[i]->insnsi; in jit_subprogs()
20088 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
20090 subprog = insn->off; in jit_subprogs()
20091 insn[0].imm = (u32)(long)func[subprog]->bpf_func; in jit_subprogs()
20092 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; in jit_subprogs()
20097 subprog = insn->off; in jit_subprogs()
20098 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); in jit_subprogs()
20112 func[i]->aux->func = func; in jit_subprogs()
20113 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
20114 func[i]->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
20116 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20117 old_bpf_func = func[i]->bpf_func; in jit_subprogs()
20119 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { in jit_subprogs()
20120 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
20121 err = -ENOTSUPP; in jit_subprogs()
20131 for (i = 1; i < env->subprog_cnt; i++) { in jit_subprogs()
20137 for (i = 1; i < env->subprog_cnt; i++) in jit_subprogs()
20144 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
20146 insn[0].imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
20147 insn[1].imm = insn->off; in jit_subprogs()
20148 insn->off = 0; in jit_subprogs()
20153 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
20154 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
20155 insn->imm = subprog; in jit_subprogs()
20158 prog->jited = 1; in jit_subprogs()
20159 prog->bpf_func = func[0]->bpf_func; in jit_subprogs()
20160 prog->jited_len = func[0]->jited_len; in jit_subprogs()
20161 prog->aux->extable = func[0]->aux->extable; in jit_subprogs()
20162 prog->aux->num_exentries = func[0]->aux->num_exentries; in jit_subprogs()
20163 prog->aux->func = func; in jit_subprogs()
20164 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
20165 prog->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
20166 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; in jit_subprogs()
20167 prog->aux->exception_boundary = func[0]->aux->exception_boundary; in jit_subprogs()
20175 for (i = 0; i < prog->aux->size_poke_tab; i++) { in jit_subprogs()
20176 map_ptr = prog->aux->poke_tab[i].tail_call.map; in jit_subprogs()
20177 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); in jit_subprogs()
20183 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20186 func[i]->aux->poke_tab = NULL; in jit_subprogs()
20192 prog->jit_requested = 0; in jit_subprogs()
20193 prog->blinding_requested = 0; in jit_subprogs()
20194 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
20197 insn->off = 0; in jit_subprogs()
20198 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
20207 struct bpf_prog *prog = env->prog; in fixup_call_args()
20208 struct bpf_insn *insn = prog->insnsi; in fixup_call_args()
20214 if (env->prog->jit_requested && in fixup_call_args()
20215 !bpf_prog_is_offloaded(env->prog->aux)) { in fixup_call_args()
20219 if (err == -EFAULT) in fixup_call_args()
20224 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); in fixup_call_args()
20225 return -EINVAL; in fixup_call_args()
20227 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
20231 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in fixup_call_args()
20232 return -EINVAL; in fixup_call_args()
20234 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
20239 verbose(env, "callbacks are not allowed in non-JITed programs\n"); in fixup_call_args()
20240 return -EINVAL; in fixup_call_args()
20259 struct bpf_prog *prog = env->prog; in specialize_kfunc()
20277 seen_direct_write = env->seen_direct_write; in specialize_kfunc()
20283 /* restore env->seen_direct_write to its original value, since in specialize_kfunc()
20286 env->seen_direct_write = seen_direct_write; in specialize_kfunc()
20297 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; in __fixup_collection_insert_kfunc()
20302 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); in __fixup_collection_insert_kfunc()
20312 if (!insn->imm) { in fixup_kfunc_call()
20314 return -EINVAL; in fixup_kfunc_call()
20319 /* insn->imm has the btf func_id. Replace it with an offset relative to in fixup_kfunc_call()
20323 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
20326 insn->imm); in fixup_kfunc_call()
20327 return -EFAULT; in fixup_kfunc_call()
20331 insn->imm = BPF_CALL_IMM(desc->addr); in fixup_kfunc_call()
20332 if (insn->off) in fixup_kfunc_call()
20334 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || in fixup_kfunc_call()
20335 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in fixup_kfunc_call()
20336 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
20338 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; in fixup_kfunc_call()
20340 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { in fixup_kfunc_call()
20343 return -EFAULT; in fixup_kfunc_call()
20351 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || in fixup_kfunc_call()
20352 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || in fixup_kfunc_call()
20353 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { in fixup_kfunc_call()
20354 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
20357 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { in fixup_kfunc_call()
20360 return -EFAULT; in fixup_kfunc_call()
20363 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in fixup_kfunc_call()
20367 return -EFAULT; in fixup_kfunc_call()
20374 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || in fixup_kfunc_call()
20375 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || in fixup_kfunc_call()
20376 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
20377 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
20381 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ in fixup_kfunc_call()
20382 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
20390 return -EFAULT; in fixup_kfunc_call()
20393 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, in fixup_kfunc_call()
20395 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || in fixup_kfunc_call()
20396 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { in fixup_kfunc_call()
20399 } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) { in fixup_kfunc_call()
20400 struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) }; in fixup_kfunc_call()
20410 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
20413 struct bpf_subprog_info *info = env->subprog_info; in add_hidden_subprog()
20414 int cnt = env->subprog_cnt; in add_hidden_subprog()
20418 if (env->hidden_subprog_cnt) { in add_hidden_subprog()
20420 return -EFAULT; in add_hidden_subprog()
20424 * in bpf_patch_insn_data are no-ops. in add_hidden_subprog()
20426 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); in add_hidden_subprog()
20428 return -ENOMEM; in add_hidden_subprog()
20429 env->prog = prog; in add_hidden_subprog()
20431 info[cnt].start = prog->len - len + 1; in add_hidden_subprog()
20432 env->subprog_cnt++; in add_hidden_subprog()
20433 env->hidden_subprog_cnt++; in add_hidden_subprog()
20437 /* Do various post-verification rewrites in a single program pass.
20442 struct bpf_prog *prog = env->prog; in do_misc_fixups()
20443 enum bpf_attach_type eatype = prog->expected_attach_type; in do_misc_fixups()
20445 struct bpf_insn *insn = prog->insnsi; in do_misc_fixups()
20447 const int insn_cnt = prog->len; in do_misc_fixups()
20450 struct bpf_insn *insn_buf = env->insn_buf; in do_misc_fixups()
20454 struct bpf_subprog_info *subprogs = env->subprog_info; in do_misc_fixups()
20458 if (env->seen_exception && !env->exception_callback_subprog) { in do_misc_fixups()
20460 env->prog->insnsi[insn_cnt - 1], in do_misc_fixups()
20468 prog = env->prog; in do_misc_fixups()
20469 insn = prog->insnsi; in do_misc_fixups()
20471 env->exception_callback_subprog = env->subprog_cnt - 1; in do_misc_fixups()
20473 mark_subprog_exc_cb(env, env->exception_callback_subprog); in do_misc_fixups()
20477 if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) { in do_misc_fixups()
20478 if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) || in do_misc_fixups()
20479 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) { in do_misc_fixups()
20480 /* convert to 32-bit mov that clears upper 32-bit */ in do_misc_fixups()
20481 insn->code = BPF_ALU | BPF_MOV | BPF_X; in do_misc_fixups()
20483 insn->off = 0; in do_misc_fixups()
20484 insn->imm = 0; in do_misc_fixups()
20489 if (env->insn_aux_data[i + delta].needs_zext) in do_misc_fixups()
20490 /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */ in do_misc_fixups()
20491 insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); in do_misc_fixups()
20493 /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ in do_misc_fixups()
20494 if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || in do_misc_fixups()
20495 insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || in do_misc_fixups()
20496 insn->code == (BPF_ALU | BPF_MOD | BPF_K) || in do_misc_fixups()
20497 insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && in do_misc_fixups()
20498 insn->off == 1 && insn->imm == -1) { in do_misc_fixups()
20499 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
20500 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
20504 BPF_NEG | BPF_K, insn->dst_reg, in do_misc_fixups()
20508 BPF_MOV32_IMM(insn->dst_reg, 0), in do_misc_fixups()
20516 return -ENOMEM; in do_misc_fixups()
20518 delta += cnt - 1; in do_misc_fixups()
20519 env->prog = prog = new_prog; in do_misc_fixups()
20520 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20524 /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ in do_misc_fixups()
20525 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || in do_misc_fixups()
20526 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in do_misc_fixups()
20527 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || in do_misc_fixups()
20528 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in do_misc_fixups()
20529 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
20530 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
20531 bool is_sdiv = isdiv && insn->off == 1; in do_misc_fixups()
20532 bool is_smod = !isdiv && insn->off == 1; in do_misc_fixups()
20535 /* [R,W]x div 0 -> 0 */ in do_misc_fixups()
20537 BPF_JNE | BPF_K, insn->src_reg, in do_misc_fixups()
20539 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), in do_misc_fixups()
20544 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
20546 BPF_JEQ | BPF_K, insn->src_reg, in do_misc_fixups()
20550 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), in do_misc_fixups()
20553 /* [R,W]x sdiv 0 -> 0 in do_misc_fixups()
20554 * LLONG_MIN sdiv -1 -> LLONG_MIN in do_misc_fixups()
20555 * INT_MIN sdiv -1 -> INT_MIN in do_misc_fixups()
20557 BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), in do_misc_fixups()
20568 BPF_MOV | BPF_K, insn->dst_reg, in do_misc_fixups()
20570 /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ in do_misc_fixups()
20572 BPF_NEG | BPF_K, insn->dst_reg, in do_misc_fixups()
20578 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
20579 /* [R,W]x mod -1 -> 0 */ in do_misc_fixups()
20580 BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), in do_misc_fixups()
20590 BPF_MOV32_IMM(insn->dst_reg, 0), in do_misc_fixups()
20594 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), in do_misc_fixups()
20602 cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); in do_misc_fixups()
20606 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); in do_misc_fixups()
20611 return -ENOMEM; in do_misc_fixups()
20613 delta += cnt - 1; in do_misc_fixups()
20614 env->prog = prog = new_prog; in do_misc_fixups()
20615 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20619 /* Make it impossible to de-reference a userspace address */ in do_misc_fixups()
20620 if (BPF_CLASS(insn->code) == BPF_LDX && in do_misc_fixups()
20621 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in do_misc_fixups()
20622 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { in do_misc_fixups()
20629 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); in do_misc_fixups()
20630 if (insn->off) in do_misc_fixups()
20631 *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off); in do_misc_fixups()
20636 *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0); in do_misc_fixups()
20638 cnt = patch - insn_buf; in do_misc_fixups()
20641 return -ENOMEM; in do_misc_fixups()
20643 delta += cnt - 1; in do_misc_fixups()
20644 env->prog = prog = new_prog; in do_misc_fixups()
20645 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20650 if (BPF_CLASS(insn->code) == BPF_LD && in do_misc_fixups()
20651 (BPF_MODE(insn->code) == BPF_ABS || in do_misc_fixups()
20652 BPF_MODE(insn->code) == BPF_IND)) { in do_misc_fixups()
20653 cnt = env->ops->gen_ld_abs(insn, insn_buf); in do_misc_fixups()
20656 return -EINVAL; in do_misc_fixups()
20661 return -ENOMEM; in do_misc_fixups()
20663 delta += cnt - 1; in do_misc_fixups()
20664 env->prog = prog = new_prog; in do_misc_fixups()
20665 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20670 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || in do_misc_fixups()
20671 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { in do_misc_fixups()
20678 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
20679 if (!aux->alu_state || in do_misc_fixups()
20680 aux->alu_state == BPF_ALU_NON_POINTER) in do_misc_fixups()
20683 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; in do_misc_fixups()
20684 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == in do_misc_fixups()
20686 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; in do_misc_fixups()
20688 off_reg = issrc ? insn->src_reg : insn->dst_reg; in do_misc_fixups()
20690 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
20693 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
20694 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
20702 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); in do_misc_fixups()
20703 insn->src_reg = BPF_REG_AX; in do_misc_fixups()
20705 insn->code = insn->code == code_add ? in do_misc_fixups()
20709 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
20710 cnt = patch - insn_buf; in do_misc_fixups()
20714 return -ENOMEM; in do_misc_fixups()
20716 delta += cnt - 1; in do_misc_fixups()
20717 env->prog = prog = new_prog; in do_misc_fixups()
20718 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20723 int stack_off = -stack_depth - 8; in do_misc_fixups()
20727 if (insn->off >= 0) in do_misc_fixups()
20728 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2); in do_misc_fixups()
20730 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); in do_misc_fixups()
20737 return -ENOMEM; in do_misc_fixups()
20739 delta += cnt - 1; in do_misc_fixups()
20740 env->prog = prog = new_prog; in do_misc_fixups()
20741 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20745 if (insn->code != (BPF_JMP | BPF_CALL)) in do_misc_fixups()
20747 if (insn->src_reg == BPF_PSEUDO_CALL) in do_misc_fixups()
20749 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_misc_fixups()
20758 return -ENOMEM; in do_misc_fixups()
20760 delta += cnt - 1; in do_misc_fixups()
20761 env->prog = prog = new_prog; in do_misc_fixups()
20762 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20767 if (bpf_jit_inlines_helper_call(insn->imm)) in do_misc_fixups()
20770 if (insn->imm == BPF_FUNC_get_route_realm) in do_misc_fixups()
20771 prog->dst_needed = 1; in do_misc_fixups()
20772 if (insn->imm == BPF_FUNC_get_prandom_u32) in do_misc_fixups()
20774 if (insn->imm == BPF_FUNC_override_return) in do_misc_fixups()
20775 prog->kprobe_override = 1; in do_misc_fixups()
20776 if (insn->imm == BPF_FUNC_tail_call) { in do_misc_fixups()
20782 prog->cb_access = 1; in do_misc_fixups()
20784 prog->aux->stack_depth = MAX_BPF_STACK; in do_misc_fixups()
20785 prog->aux->max_pkt_offset = MAX_PACKET_OFF; in do_misc_fixups()
20792 insn->imm = 0; in do_misc_fixups()
20793 insn->code = BPF_JMP | BPF_TAIL_CALL; in do_misc_fixups()
20795 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
20796 if (env->bpf_capable && !prog->blinding_requested && in do_misc_fixups()
20797 prog->jit_requested && in do_misc_fixups()
20803 .tail_call.map = aux->map_ptr_state.map_ptr, in do_misc_fixups()
20814 insn->imm = ret + 1; in do_misc_fixups()
20824 * index &= array->index_mask; in do_misc_fixups()
20825 * to avoid out-of-bounds cpu speculation in do_misc_fixups()
20829 return -EINVAL; in do_misc_fixups()
20832 map_ptr = aux->map_ptr_state.map_ptr; in do_misc_fixups()
20834 map_ptr->max_entries, 2); in do_misc_fixups()
20838 map)->index_mask); in do_misc_fixups()
20843 return -ENOMEM; in do_misc_fixups()
20845 delta += cnt - 1; in do_misc_fixups()
20846 env->prog = prog = new_prog; in do_misc_fixups()
20847 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20851 if (insn->imm == BPF_FUNC_timer_set_callback) { in do_misc_fixups()
20860 * Those that were not bpf_timer_init-ed will return -EINVAL. in do_misc_fixups()
20862 * Those that were not both bpf_timer_init-ed and in do_misc_fixups()
20863 * bpf_timer_set_callback-ed will return -EINVAL. in do_misc_fixups()
20866 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), in do_misc_fixups()
20876 return -ENOMEM; in do_misc_fixups()
20878 delta += cnt - 1; in do_misc_fixups()
20879 env->prog = prog = new_prog; in do_misc_fixups()
20880 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20884 if (is_storage_get_function(insn->imm)) { in do_misc_fixups()
20886 env->insn_aux_data[i + delta].storage_get_func_atomic) in do_misc_fixups()
20895 return -ENOMEM; in do_misc_fixups()
20897 delta += cnt - 1; in do_misc_fixups()
20898 env->prog = prog = new_prog; in do_misc_fixups()
20899 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20904 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { in do_misc_fixups()
20914 return -ENOMEM; in do_misc_fixups()
20916 delta += cnt - 1; in do_misc_fixups()
20917 env->prog = prog = new_prog; in do_misc_fixups()
20918 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20926 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
20927 (insn->imm == BPF_FUNC_map_lookup_elem || in do_misc_fixups()
20928 insn->imm == BPF_FUNC_map_update_elem || in do_misc_fixups()
20929 insn->imm == BPF_FUNC_map_delete_elem || in do_misc_fixups()
20930 insn->imm == BPF_FUNC_map_push_elem || in do_misc_fixups()
20931 insn->imm == BPF_FUNC_map_pop_elem || in do_misc_fixups()
20932 insn->imm == BPF_FUNC_map_peek_elem || in do_misc_fixups()
20933 insn->imm == BPF_FUNC_redirect_map || in do_misc_fixups()
20934 insn->imm == BPF_FUNC_for_each_map_elem || in do_misc_fixups()
20935 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { in do_misc_fixups()
20936 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
20940 map_ptr = aux->map_ptr_state.map_ptr; in do_misc_fixups()
20941 ops = map_ptr->ops; in do_misc_fixups()
20942 if (insn->imm == BPF_FUNC_map_lookup_elem && in do_misc_fixups()
20943 ops->map_gen_lookup) { in do_misc_fixups()
20944 cnt = ops->map_gen_lookup(map_ptr, insn_buf); in do_misc_fixups()
20945 if (cnt == -EOPNOTSUPP) in do_misc_fixups()
20949 return -EINVAL; in do_misc_fixups()
20955 return -ENOMEM; in do_misc_fixups()
20957 delta += cnt - 1; in do_misc_fixups()
20958 env->prog = prog = new_prog; in do_misc_fixups()
20959 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
20963 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, in do_misc_fixups()
20965 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, in do_misc_fixups()
20967 BUILD_BUG_ON(!__same_type(ops->map_update_elem, in do_misc_fixups()
20970 BUILD_BUG_ON(!__same_type(ops->map_push_elem, in do_misc_fixups()
20973 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, in do_misc_fixups()
20975 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, in do_misc_fixups()
20977 BUILD_BUG_ON(!__same_type(ops->map_redirect, in do_misc_fixups()
20979 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, in do_misc_fixups()
20984 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, in do_misc_fixups()
20988 switch (insn->imm) { in do_misc_fixups()
20990 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); in do_misc_fixups()
20993 insn->imm = BPF_CALL_IMM(ops->map_update_elem); in do_misc_fixups()
20996 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); in do_misc_fixups()
20999 insn->imm = BPF_CALL_IMM(ops->map_push_elem); in do_misc_fixups()
21002 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); in do_misc_fixups()
21005 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); in do_misc_fixups()
21008 insn->imm = BPF_CALL_IMM(ops->map_redirect); in do_misc_fixups()
21011 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); in do_misc_fixups()
21014 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); in do_misc_fixups()
21022 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21023 insn->imm == BPF_FUNC_jiffies64) { in do_misc_fixups()
21038 return -ENOMEM; in do_misc_fixups()
21040 delta += cnt - 1; in do_misc_fixups()
21041 env->prog = prog = new_prog; in do_misc_fixups()
21042 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21048 if (insn->imm == BPF_FUNC_get_smp_processor_id && in do_misc_fixups()
21049 verifier_inlines_helper_call(env, insn->imm)) { in do_misc_fixups()
21062 return -ENOMEM; in do_misc_fixups()
21064 delta += cnt - 1; in do_misc_fixups()
21065 env->prog = prog = new_prog; in do_misc_fixups()
21066 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21072 insn->imm == BPF_FUNC_get_func_arg) { in do_misc_fixups()
21073 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
21074 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
21082 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
21087 return -ENOMEM; in do_misc_fixups()
21089 delta += cnt - 1; in do_misc_fixups()
21090 env->prog = prog = new_prog; in do_misc_fixups()
21091 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21097 insn->imm == BPF_FUNC_get_func_ret) { in do_misc_fixups()
21100 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
21101 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
21109 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); in do_misc_fixups()
21115 return -ENOMEM; in do_misc_fixups()
21117 delta += cnt - 1; in do_misc_fixups()
21118 env->prog = prog = new_prog; in do_misc_fixups()
21119 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21125 insn->imm == BPF_FUNC_get_func_arg_cnt) { in do_misc_fixups()
21126 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
21127 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
21131 return -ENOMEM; in do_misc_fixups()
21133 env->prog = prog = new_prog; in do_misc_fixups()
21134 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21140 insn->imm == BPF_FUNC_get_func_ip) { in do_misc_fixups()
21141 /* Load IP address from ctx - 16 */ in do_misc_fixups()
21142 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); in do_misc_fixups()
21146 return -ENOMEM; in do_misc_fixups()
21148 env->prog = prog = new_prog; in do_misc_fixups()
21149 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21155 prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21156 insn->imm == BPF_FUNC_get_branch_snapshot) { in do_misc_fixups()
21169 /* if (unlikely(flags)) return -EINVAL */ in do_misc_fixups()
21174 * divide-by-3 through multiplication, followed by further in do_misc_fixups()
21175 * division by 8 through 3-bit right shift. in do_misc_fixups()
21187 /* if (entry_cnt == 0) return -ENOENT */ in do_misc_fixups()
21192 /* return -EINVAL; */ in do_misc_fixups()
21193 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
21195 /* return -ENOENT; */ in do_misc_fixups()
21196 insn_buf[10] = BPF_MOV64_IMM(BPF_REG_0, -ENOENT); in do_misc_fixups()
21201 return -ENOMEM; in do_misc_fixups()
21203 delta += cnt - 1; in do_misc_fixups()
21204 env->prog = prog = new_prog; in do_misc_fixups()
21205 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21210 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21211 insn->imm == BPF_FUNC_kptr_xchg && in do_misc_fixups()
21219 return -ENOMEM; in do_misc_fixups()
21221 delta += cnt - 1; in do_misc_fixups()
21222 env->prog = prog = new_prog; in do_misc_fixups()
21223 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21227 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
21229 * programs to call them, must be real in-kernel functions in do_misc_fixups()
21231 if (!fn->func) { in do_misc_fixups()
21234 func_id_name(insn->imm), insn->imm); in do_misc_fixups()
21235 return -EFAULT; in do_misc_fixups()
21237 insn->imm = fn->func - __bpf_call_base; in do_misc_fixups()
21250 env->prog->aux->stack_depth = subprogs[0].stack_depth; in do_misc_fixups()
21251 for (i = 0; i < env->subprog_cnt; i++) { in do_misc_fixups()
21259 return -EFAULT; in do_misc_fixups()
21264 -subprogs[i].stack_depth, BPF_MAX_LOOPS); in do_misc_fixups()
21266 insn_buf[1] = env->prog->insnsi[subprog_start]; in do_misc_fixups()
21270 return -ENOMEM; in do_misc_fixups()
21271 env->prog = prog = new_prog; in do_misc_fixups()
21278 WARN_ON(adjust_jmp_off(env->prog, subprog_start, 1)); in do_misc_fixups()
21282 for (i = 0; i < prog->aux->size_poke_tab; i++) { in do_misc_fixups()
21283 map_ptr = prog->aux->poke_tab[i].tail_call.map; in do_misc_fixups()
21284 if (!map_ptr->ops->map_poke_track || in do_misc_fixups()
21285 !map_ptr->ops->map_poke_untrack || in do_misc_fixups()
21286 !map_ptr->ops->map_poke_run) { in do_misc_fixups()
21288 return -EINVAL; in do_misc_fixups()
21291 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); in do_misc_fixups()
21298 sort_kfunc_descs_by_imm_off(env->prog); in do_misc_fixups()
21316 struct bpf_insn *insn_buf = env->insn_buf; in inline_bpf_loop()
21331 insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG); in inline_bpf_loop()
21354 insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6); in inline_bpf_loop()
21370 callback_start = env->subprog_info[callback_subprogno].start; in inline_bpf_loop()
21373 callback_offset = callback_start - call_insn_offset - 1; in inline_bpf_loop()
21374 new_prog->insnsi[call_insn_offset].imm = callback_offset; in inline_bpf_loop()
21381 return insn->code == (BPF_JMP | BPF_CALL) && in is_bpf_loop_call()
21382 insn->src_reg == 0 && in is_bpf_loop_call()
21383 insn->imm == BPF_FUNC_loop; in is_bpf_loop_call()
21386 /* For all sub-programs in the program (including main) check
21397 struct bpf_subprog_info *subprogs = env->subprog_info; in optimize_bpf_loop()
21399 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop()
21400 int insn_cnt = env->prog->len; in optimize_bpf_loop()
21402 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
21407 &env->insn_aux_data[i + delta].loop_inline_state; in optimize_bpf_loop()
21409 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { in optimize_bpf_loop()
21415 -(stack_depth + stack_depth_extra), in optimize_bpf_loop()
21416 inline_state->callback_subprogno, in optimize_bpf_loop()
21419 return -ENOMEM; in optimize_bpf_loop()
21421 delta += cnt - 1; in optimize_bpf_loop()
21422 env->prog = new_prog; in optimize_bpf_loop()
21423 insn = new_prog->insnsi + i + delta; in optimize_bpf_loop()
21430 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
21435 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in optimize_bpf_loop()
21445 struct bpf_subprog_info *subprog = env->subprog_info; in remove_fastcall_spills_fills()
21446 struct bpf_insn_aux_data *aux = env->insn_aux_data; in remove_fastcall_spills_fills()
21447 struct bpf_insn *insn = env->prog->insnsi; in remove_fastcall_spills_fills()
21448 int insn_cnt = env->prog->len; in remove_fastcall_spills_fills()
21458 *(insn - j) = NOP; in remove_fastcall_spills_fills()
21463 if ((subprog + 1)->start == i + 1) { in remove_fastcall_spills_fills()
21464 if (modified && !subprog->keep_fastcall_stack) in remove_fastcall_spills_fills()
21465 subprog->stack_depth = -subprog->fastcall_stack_off; in remove_fastcall_spills_fills()
21479 sl = env->free_list; in free_states()
21481 sln = sl->next; in free_states()
21482 free_verifier_state(&sl->state, false); in free_states()
21486 env->free_list = NULL; in free_states()
21488 if (!env->explored_states) in free_states()
21492 sl = env->explored_states[i]; in free_states()
21495 sln = sl->next; in free_states()
21496 free_verifier_state(&sl->state, false); in free_states()
21500 env->explored_states[i] = NULL; in free_states()
21506 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check_common()
21512 env->prev_linfo = NULL; in do_check_common()
21513 env->pass_cnt++; in do_check_common()
21517 return -ENOMEM; in do_check_common()
21518 state->curframe = 0; in do_check_common()
21519 state->speculative = false; in do_check_common()
21520 state->branches = 1; in do_check_common()
21521 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); in do_check_common()
21522 if (!state->frame[0]) { in do_check_common()
21524 return -ENOMEM; in do_check_common()
21526 env->cur_state = state; in do_check_common()
21527 init_func_state(env, state->frame[0], in do_check_common()
21531 state->first_insn_idx = env->subprog_info[subprog].start; in do_check_common()
21532 state->last_insn_idx = -1; in do_check_common()
21534 regs = state->frame[state->curframe]->regs; in do_check_common()
21535 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
21546 state->frame[0]->in_exception_callback_fn = true; in do_check_common()
21551 if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) { in do_check_common()
21553 ret = -EINVAL; in do_check_common()
21557 for (i = BPF_REG_1; i <= sub->arg_cnt; i++) { in do_check_common()
21558 arg = &sub->args[i - BPF_REG_1]; in do_check_common()
21561 if (arg->arg_type == ARG_PTR_TO_CTX) { in do_check_common()
21562 reg->type = PTR_TO_CTX; in do_check_common()
21564 } else if (arg->arg_type == ARG_ANYTHING) { in do_check_common()
21565 reg->type = SCALAR_VALUE; in do_check_common()
21567 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in do_check_common()
21569 __mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen); in do_check_common()
21570 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in do_check_common()
21571 reg->type = PTR_TO_MEM; in do_check_common()
21572 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
21573 reg->type |= PTR_MAYBE_NULL; in do_check_common()
21575 reg->mem_size = arg->mem_size; in do_check_common()
21576 reg->id = ++env->id_gen; in do_check_common()
21577 } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { in do_check_common()
21578 reg->type = PTR_TO_BTF_ID; in do_check_common()
21579 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
21580 reg->type |= PTR_MAYBE_NULL; in do_check_common()
21581 if (arg->arg_type & PTR_UNTRUSTED) in do_check_common()
21582 reg->type |= PTR_UNTRUSTED; in do_check_common()
21583 if (arg->arg_type & PTR_TRUSTED) in do_check_common()
21584 reg->type |= PTR_TRUSTED; in do_check_common()
21586 reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */ in do_check_common()
21587 reg->btf_id = arg->btf_id; in do_check_common()
21588 reg->id = ++env->id_gen; in do_check_common()
21589 } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { in do_check_common()
21594 i - BPF_REG_1, arg->arg_type); in do_check_common()
21595 ret = -EFAULT; in do_check_common()
21604 if (env->prog->aux->func_info_aux) { in do_check_common()
21606 if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX) in do_check_common()
21607 env->prog->aux->func_info_aux[0].unreliable = true; in do_check_common()
21620 if (env->cur_state) { in do_check_common()
21621 free_verifier_state(env->cur_state, true); in do_check_common()
21622 env->cur_state = NULL; in do_check_common()
21626 bpf_vlog_reset(&env->log, 0); in do_check_common()
21653 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
21657 if (!aux->func_info) in do_check_subprogs()
21661 if (env->exception_callback_subprog) in do_check_subprogs()
21662 subprog_aux(env, env->exception_callback_subprog)->called = true; in do_check_subprogs()
21666 for (i = 1; i < env->subprog_cnt; i++) { in do_check_subprogs()
21671 if (!sub_aux->called || sub_aux->verified) in do_check_subprogs()
21674 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
21675 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
21679 } else if (env->log.level & BPF_LOG_LEVEL) { in do_check_subprogs()
21688 sub_aux->verified = true; in do_check_subprogs()
21705 env->insn_idx = 0; in do_check_main()
21708 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
21717 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
21719 div_u64(env->verification_time, 1000)); in print_verification_stats()
21721 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
21722 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
21725 if (i + 1 < env->subprog_cnt) in print_verification_stats()
21732 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
21733 env->max_states_per_insn, env->total_states, in print_verification_stats()
21734 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
21743 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id()
21749 if (!prog->gpl_compatible) { in check_struct_ops_btf_id()
21751 return -EINVAL; in check_struct_ops_btf_id()
21754 if (!prog->aux->attach_btf_id) in check_struct_ops_btf_id()
21755 return -ENOTSUPP; in check_struct_ops_btf_id()
21757 btf = prog->aux->attach_btf; in check_struct_ops_btf_id()
21760 env->attach_btf_mod = btf_try_get_module(btf); in check_struct_ops_btf_id()
21761 if (!env->attach_btf_mod) { in check_struct_ops_btf_id()
21764 return -ENOTSUPP; in check_struct_ops_btf_id()
21768 btf_id = prog->aux->attach_btf_id; in check_struct_ops_btf_id()
21773 return -ENOTSUPP; in check_struct_ops_btf_id()
21775 st_ops = st_ops_desc->st_ops; in check_struct_ops_btf_id()
21777 t = st_ops_desc->type; in check_struct_ops_btf_id()
21778 member_idx = prog->expected_attach_type; in check_struct_ops_btf_id()
21781 member_idx, st_ops->name); in check_struct_ops_btf_id()
21782 return -EINVAL; in check_struct_ops_btf_id()
21786 mname = btf_name_by_offset(btf, member->name_off); in check_struct_ops_btf_id()
21787 func_proto = btf_type_resolve_func_ptr(btf, member->type, in check_struct_ops_btf_id()
21791 mname, member_idx, st_ops->name); in check_struct_ops_btf_id()
21792 return -EINVAL; in check_struct_ops_btf_id()
21798 mname, st_ops->name); in check_struct_ops_btf_id()
21802 if (st_ops->check_member) { in check_struct_ops_btf_id()
21803 err = st_ops->check_member(t, member, prog); in check_struct_ops_btf_id()
21807 mname, st_ops->name); in check_struct_ops_btf_id()
21813 prog->aux->ctx_arg_info = in check_struct_ops_btf_id()
21814 st_ops_desc->arg_info[member_idx].info; in check_struct_ops_btf_id()
21815 prog->aux->ctx_arg_info_size = in check_struct_ops_btf_id()
21816 st_ops_desc->arg_info[member_idx].cnt; in check_struct_ops_btf_id()
21818 prog->aux->attach_func_proto = func_proto; in check_struct_ops_btf_id()
21819 prog->aux->attach_func_name = mname; in check_struct_ops_btf_id()
21820 env->ops = st_ops->verifier_ops; in check_struct_ops_btf_id()
21829 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) in check_attach_modify_return()
21832 return -EINVAL; in check_attach_modify_return()
21835 /* list of non-sleepable functions that are otherwise on
21839 /* Three functions below can be called from sleepable and non-sleepable context.
21840 * Assume non-sleepable from bpf safety point of view.
21862 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; in bpf_check_attach_target()
21863 bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; in bpf_check_attach_target()
21867 int ret = 0, subprog = -1, i; in bpf_check_attach_target()
21877 return -EINVAL; in bpf_check_attach_target()
21879 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; in bpf_check_attach_target()
21883 return -EINVAL; in bpf_check_attach_target()
21888 return -EINVAL; in bpf_check_attach_target()
21890 tname = btf_name_by_offset(btf, t->name_off); in bpf_check_attach_target()
21893 return -EINVAL; in bpf_check_attach_target()
21896 struct bpf_prog_aux *aux = tgt_prog->aux; in bpf_check_attach_target()
21898 if (bpf_prog_is_dev_bound(prog->aux) && in bpf_check_attach_target()
21901 return -EINVAL; in bpf_check_attach_target()
21904 for (i = 0; i < aux->func_info_cnt; i++) in bpf_check_attach_target()
21905 if (aux->func_info[i].type_id == btf_id) { in bpf_check_attach_target()
21909 if (subprog == -1) { in bpf_check_attach_target()
21911 return -EINVAL; in bpf_check_attach_target()
21913 if (aux->func && aux->func[subprog]->aux->exception_cb) { in bpf_check_attach_target()
21917 return -EINVAL; in bpf_check_attach_target()
21919 conservative = aux->func_info_aux[subprog].unreliable; in bpf_check_attach_target()
21924 return -EINVAL; in bpf_check_attach_target()
21926 if (!prog->jit_requested) { in bpf_check_attach_target()
21929 return -EINVAL; in bpf_check_attach_target()
21932 if (!tgt_prog->jited) { in bpf_check_attach_target()
21934 return -EINVAL; in bpf_check_attach_target()
21937 if (aux->attach_tracing_prog) { in bpf_check_attach_target()
21944 return -EINVAL; in bpf_check_attach_target()
21946 } else if (tgt_prog->type == prog->type) { in bpf_check_attach_target()
21948 * To avoid potential call chain cycles, prevent attaching of a in bpf_check_attach_target()
21953 return -EINVAL; in bpf_check_attach_target()
21955 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && in bpf_check_attach_target()
21957 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || in bpf_check_attach_target()
21958 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { in bpf_check_attach_target()
21969 * long call chain fentry->extension->fentry->extension in bpf_check_attach_target()
21974 return -EINVAL; in bpf_check_attach_target()
21979 return -EINVAL; in bpf_check_attach_target()
21983 switch (prog->expected_attach_type) { in bpf_check_attach_target()
21988 return -EINVAL; in bpf_check_attach_target()
21993 return -EINVAL; in bpf_check_attach_target()
21995 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { in bpf_check_attach_target()
21998 return -EINVAL; in bpf_check_attach_target()
22000 tname += sizeof(prefix) - 1; in bpf_check_attach_target()
22007 return -EINVAL; in bpf_check_attach_target()
22008 fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL, in bpf_check_attach_target()
22018 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22021 return -EINVAL; in bpf_check_attach_target()
22026 return -EINVAL; in bpf_check_attach_target()
22029 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22032 return -EINVAL; in bpf_check_attach_target()
22039 return -EINVAL; in bpf_check_attach_target()
22041 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22043 return -EINVAL; in bpf_check_attach_target()
22044 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
22050 return -EINVAL; in bpf_check_attach_target()
22060 return -EINVAL; in bpf_check_attach_target()
22064 return -EINVAL; in bpf_check_attach_target()
22065 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22067 return -EINVAL; in bpf_check_attach_target()
22069 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && in bpf_check_attach_target()
22070 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || in bpf_check_attach_target()
22071 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) in bpf_check_attach_target()
22072 return -EINVAL; in bpf_check_attach_target()
22077 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
22083 addr = (long) tgt_prog->bpf_func; in bpf_check_attach_target()
22085 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; in bpf_check_attach_target()
22101 return -ENOENT; in bpf_check_attach_target()
22105 if (prog->sleepable) { in bpf_check_attach_target()
22106 ret = -EINVAL; in bpf_check_attach_target()
22107 switch (prog->type) { in bpf_check_attach_target()
22142 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { in bpf_check_attach_target()
22146 return -EINVAL; in bpf_check_attach_target()
22148 ret = -EINVAL; in bpf_check_attach_target()
22161 tgt_info->tgt_addr = addr; in bpf_check_attach_target()
22162 tgt_info->tgt_name = tname; in bpf_check_attach_target()
22163 tgt_info->tgt_type = t; in bpf_check_attach_target()
22164 tgt_info->tgt_mod = mod; in bpf_check_attach_target()
22189 if (prog->type == BPF_PROG_TYPE_TRACING) { in BTF_SET_START()
22190 switch (prog->expected_attach_type) { in BTF_SET_START()
22200 return prog->type == BPF_PROG_TYPE_LSM || in BTF_SET_START()
22201 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || in BTF_SET_START()
22202 prog->type == BPF_PROG_TYPE_STRUCT_OPS; in BTF_SET_START()
22207 struct bpf_prog *prog = env->prog; in check_attach_btf_id()
22208 struct bpf_prog *tgt_prog = prog->aux->dst_prog; in check_attach_btf_id()
22210 u32 btf_id = prog->aux->attach_btf_id; in check_attach_btf_id()
22215 if (prog->type == BPF_PROG_TYPE_SYSCALL) { in check_attach_btf_id()
22216 if (prog->sleepable) in check_attach_btf_id()
22220 return -EINVAL; in check_attach_btf_id()
22223 if (prog->sleepable && !can_be_sleepable(prog)) { in check_attach_btf_id()
22225 return -EINVAL; in check_attach_btf_id()
22228 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) in check_attach_btf_id()
22231 if (prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
22232 prog->type != BPF_PROG_TYPE_LSM && in check_attach_btf_id()
22233 prog->type != BPF_PROG_TYPE_EXT) in check_attach_btf_id()
22236 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
22240 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { in check_attach_btf_id()
22242 * inherit env->ops and expected_attach_type for the rest of the in check_attach_btf_id()
22245 env->ops = bpf_verifier_ops[tgt_prog->type]; in check_attach_btf_id()
22246 prog->expected_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
22250 prog->aux->attach_func_proto = tgt_info.tgt_type; in check_attach_btf_id()
22251 prog->aux->attach_func_name = tgt_info.tgt_name; in check_attach_btf_id()
22252 prog->aux->mod = tgt_info.tgt_mod; in check_attach_btf_id()
22255 prog->aux->saved_dst_prog_type = tgt_prog->type; in check_attach_btf_id()
22256 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
22259 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { in check_attach_btf_id()
22260 prog->aux->attach_btf_trace = true; in check_attach_btf_id()
22262 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { in check_attach_btf_id()
22264 return -EINVAL; in check_attach_btf_id()
22268 if (prog->type == BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
22269 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
22272 } else if (prog->type == BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
22274 return -EINVAL; in check_attach_btf_id()
22277 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); in check_attach_btf_id()
22280 return -ENOMEM; in check_attach_btf_id()
22282 if (tgt_prog && tgt_prog->aux->tail_call_reachable) in check_attach_btf_id()
22283 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; in check_attach_btf_id()
22285 prog->aux->dst_trampoline = tr; in check_attach_btf_id()
22304 int i, len, ret = -EINVAL, err; in bpf_check()
22310 return -EINVAL; in bpf_check()
22317 return -ENOMEM; in bpf_check()
22319 env->bt.env = env; in bpf_check()
22321 len = (*prog)->len; in bpf_check()
22322 env->insn_aux_data = in bpf_check()
22324 ret = -ENOMEM; in bpf_check()
22325 if (!env->insn_aux_data) in bpf_check()
22328 env->insn_aux_data[i].orig_idx = i; in bpf_check()
22329 env->prog = *prog; in bpf_check()
22330 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
22331 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); in bpf_check()
22333 env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); in bpf_check()
22334 env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); in bpf_check()
22335 env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token); in bpf_check()
22336 env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token); in bpf_check()
22337 env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF); in bpf_check()
22348 ret = bpf_vlog_init(&env->log, attr->log_level, in bpf_check()
22349 (char __user *) (unsigned long) attr->log_buf, in bpf_check()
22350 attr->log_size); in bpf_check()
22358 verbose(env, "in-kernel BTF is malformed\n"); in bpf_check()
22363 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
22365 env->strict_alignment = true; in bpf_check()
22366 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) in bpf_check()
22367 env->strict_alignment = false; in bpf_check()
22370 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
22371 env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; in bpf_check()
22373 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
22376 ret = -ENOMEM; in bpf_check()
22377 if (!env->explored_states) in bpf_check()
22404 if (bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
22405 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
22421 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) in bpf_check()
22425 kvfree(env->explored_states); in bpf_check()
22459 /* do 32-bit optimization after insn patching has done so those patched in bpf_check()
22462 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
22464 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
22471 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
22473 env->prog->aux->verified_insns = env->insn_processed; in bpf_check()
22476 err = bpf_vlog_finalize(&env->log, &log_true_size); in bpf_check()
22483 ret = -EFAULT; in bpf_check()
22490 if (env->used_map_cnt) { in bpf_check()
22492 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
22493 sizeof(env->used_maps[0]), in bpf_check()
22496 if (!env->prog->aux->used_maps) { in bpf_check()
22497 ret = -ENOMEM; in bpf_check()
22501 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
22502 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
22503 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
22505 if (env->used_btf_cnt) { in bpf_check()
22507 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, in bpf_check()
22508 sizeof(env->used_btfs[0]), in bpf_check()
22510 if (!env->prog->aux->used_btfs) { in bpf_check()
22511 ret = -ENOMEM; in bpf_check()
22515 memcpy(env->prog->aux->used_btfs, env->used_btfs, in bpf_check()
22516 sizeof(env->used_btfs[0]) * env->used_btf_cnt); in bpf_check()
22517 env->prog->aux->used_btf_cnt = env->used_btf_cnt; in bpf_check()
22519 if (env->used_map_cnt || env->used_btf_cnt) { in bpf_check()
22529 if (!env->prog->aux->used_maps) in bpf_check()
22534 if (!env->prog->aux->used_btfs) in bpf_check()
22540 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
22541 env->prog->expected_attach_type = 0; in bpf_check()
22543 *prog = env->prog; in bpf_check()
22545 module_put(env->attach_btf_mod); in bpf_check()
22549 vfree(env->insn_aux_data); in bpf_check()