Lines Matching +full:tie +full:- +full:off

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
19 if (!!log->ubuf != !!log->len_total) in bpf_verifier_log_attr_valid()
22 if (log->ubuf && log->level == 0) in bpf_verifier_log_attr_valid()
24 if (log->level & ~BPF_LOG_MASK) in bpf_verifier_log_attr_valid()
26 if (log->len_total > UINT_MAX >> 2) in bpf_verifier_log_attr_valid()
34 log->level = log_level; in bpf_vlog_init()
35 log->ubuf = log_buf; in bpf_vlog_init()
36 log->len_total = log_size; in bpf_vlog_init()
40 return -EINVAL; in bpf_vlog_init()
48 u64 len = log->end_pos + add_len; in bpf_vlog_update_len_max()
50 /* log->len_max could be larger than our current len due to in bpf_vlog_update_len_max()
55 log->len_max = UINT_MAX; in bpf_vlog_update_len_max()
56 else if (len > log->len_max) in bpf_vlog_update_len_max()
57 log->len_max = len; in bpf_vlog_update_len_max()
66 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); in bpf_verifier_vlog()
68 if (log->level == BPF_LOG_KERNEL) { in bpf_verifier_vlog()
69 bool newline = n > 0 && log->kbuf[n - 1] == '\n'; in bpf_verifier_vlog()
71 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n"); in bpf_verifier_vlog()
78 if (log->level & BPF_LOG_FIXED) { in bpf_verifier_vlog()
81 if (log->end_pos < log->len_total) { in bpf_verifier_vlog()
82 new_n = min_t(u32, log->len_total - log->end_pos, n); in bpf_verifier_vlog()
83 log->kbuf[new_n - 1] = '\0'; in bpf_verifier_vlog()
86 cur_pos = log->end_pos; in bpf_verifier_vlog()
87 log->end_pos += n - 1; /* don't count terminating '\0' */ in bpf_verifier_vlog()
89 if (log->ubuf && new_n && in bpf_verifier_vlog()
90 copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n)) in bpf_verifier_vlog()
96 new_end = log->end_pos + n; in bpf_verifier_vlog()
97 if (new_end - log->start_pos >= log->len_total) in bpf_verifier_vlog()
98 new_start = new_end - log->len_total; in bpf_verifier_vlog()
100 new_start = log->start_pos; in bpf_verifier_vlog()
102 log->start_pos = new_start; in bpf_verifier_vlog()
103 log->end_pos = new_end - 1; /* don't count terminating '\0' */ in bpf_verifier_vlog()
105 if (!log->ubuf) in bpf_verifier_vlog()
108 new_n = min(n, log->len_total); in bpf_verifier_vlog()
109 cur_pos = new_end - new_n; in bpf_verifier_vlog()
110 div_u64_rem(cur_pos, log->len_total, &buf_start); in bpf_verifier_vlog()
111 div_u64_rem(new_end, log->len_total, &buf_end); in bpf_verifier_vlog()
117 buf_end = log->len_total; in bpf_verifier_vlog()
127 if (copy_to_user(log->ubuf + buf_start, in bpf_verifier_vlog()
128 log->kbuf + n - new_n, in bpf_verifier_vlog()
129 buf_end - buf_start)) in bpf_verifier_vlog()
133 if (copy_to_user(log->ubuf + buf_start, in bpf_verifier_vlog()
134 log->kbuf + n - new_n, in bpf_verifier_vlog()
135 log->len_total - buf_start)) in bpf_verifier_vlog()
137 if (copy_to_user(log->ubuf, in bpf_verifier_vlog()
138 log->kbuf + n - buf_end, in bpf_verifier_vlog()
146 log->ubuf = NULL; in bpf_verifier_vlog()
154 if (WARN_ON_ONCE(new_pos > log->end_pos)) in bpf_vlog_reset()
157 if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL) in bpf_vlog_reset()
164 log->end_pos = new_pos; in bpf_vlog_reset()
165 if (log->end_pos < log->start_pos) in bpf_vlog_reset()
166 log->start_pos = log->end_pos; in bpf_vlog_reset()
168 if (!log->ubuf) in bpf_vlog_reset()
171 if (log->level & BPF_LOG_FIXED) in bpf_vlog_reset()
172 pos = log->end_pos + 1; in bpf_vlog_reset()
174 div_u64_rem(new_pos, log->len_total, &pos); in bpf_vlog_reset()
176 if (pos < log->len_total && put_user(zero, log->ubuf + pos)) in bpf_vlog_reset()
177 log->ubuf = NULL; in bpf_vlog_reset()
184 for (i = 0, j = len - 1; i < j; i++, j--) in bpf_vlog_reverse_kbuf()
190 /* we split log->kbuf into two equal parts for both ends of array */ in bpf_vlog_reverse_ubuf()
191 int n = sizeof(log->kbuf) / 2, nn; in bpf_vlog_reverse_ubuf()
192 char *lbuf = log->kbuf, *rbuf = log->kbuf + n; in bpf_vlog_reverse_ubuf()
200 while (end - start > 1) { in bpf_vlog_reverse_ubuf()
201 nn = min(n, (end - start ) / 2); in bpf_vlog_reverse_ubuf()
203 if (copy_from_user(lbuf, log->ubuf + start, nn)) in bpf_vlog_reverse_ubuf()
204 return -EFAULT; in bpf_vlog_reverse_ubuf()
205 if (copy_from_user(rbuf, log->ubuf + end - nn, nn)) in bpf_vlog_reverse_ubuf()
206 return -EFAULT; in bpf_vlog_reverse_ubuf()
214 if (copy_to_user(log->ubuf + start, rbuf, nn)) in bpf_vlog_reverse_ubuf()
215 return -EFAULT; in bpf_vlog_reverse_ubuf()
216 if (copy_to_user(log->ubuf + end - nn, lbuf, nn)) in bpf_vlog_reverse_ubuf()
217 return -EFAULT; in bpf_vlog_reverse_ubuf()
220 end -= nn; in bpf_vlog_reverse_ubuf()
232 if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL) in bpf_vlog_finalize()
235 if (!log->ubuf) in bpf_vlog_finalize()
238 if (log->start_pos == 0) in bpf_vlog_finalize()
242 * buffer beginning and be a continuous zero-terminated string. Note in bpf_vlog_finalize()
243 * that if log->start_pos != 0 then we definitely filled up entire log in bpf_vlog_finalize()
245 * the left by (log->start_pos % log->len_total) bytes. in bpf_vlog_finalize()
250 * do in-place array rotation. This is a leetcode-style problem, which in bpf_vlog_finalize()
264 * We'll utilize log->kbuf to read user memory chunk by chunk, swap in bpf_vlog_finalize()
265 * bytes, and write them back. Doing it byte-by-byte would be in bpf_vlog_finalize()
271 /* length of the chopped off part that will be the beginning; in bpf_vlog_finalize()
274 div_u64_rem(log->start_pos, log->len_total, &sublen); in bpf_vlog_finalize()
275 sublen = log->len_total - sublen; in bpf_vlog_finalize()
277 err = bpf_vlog_reverse_ubuf(log, 0, log->len_total); in bpf_vlog_finalize()
279 err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total); in bpf_vlog_finalize()
281 log->ubuf = NULL; in bpf_vlog_finalize()
284 *log_size_actual = log->len_max; in bpf_vlog_finalize()
290 if (!!log->ubuf != !!log->len_total) in bpf_vlog_finalize()
291 return -EFAULT; in bpf_vlog_finalize()
294 if (log->ubuf && log->len_max > log->len_total) in bpf_vlog_finalize()
295 return -ENOSPC; in bpf_vlog_finalize()
309 if (!bpf_verifier_log_needed(&env->log)) in bpf_verifier_log_write()
313 bpf_verifier_vlog(&env->log, fmt, args); in bpf_verifier_log_write()
340 prog = env->prog; in find_linfo()
341 nr_linfo = prog->aux->nr_linfo; in find_linfo()
343 if (!nr_linfo || insn_off >= prog->len) in find_linfo()
346 linfo = prog->aux->linfo; in find_linfo()
354 r = nr_linfo - 1; in find_linfo()
356 /* (r - l + 1) / 2 means we break a tie to the right, so if: in find_linfo()
360 * If the tie was broken to the left, m=1 would end us up in in find_linfo()
363 m = l + (r - l + 1) / 2; in find_linfo()
367 r = m - 1; in find_linfo()
389 if (!bpf_verifier_log_needed(&env->log)) in verbose_linfo()
392 prev_linfo = env->prev_linfo; in verbose_linfo()
405 if (prev_linfo && linfo->file_name_off == prev_linfo->file_name_off && in verbose_linfo()
406 BPF_LINE_INFO_LINE_NUM(linfo->line_col) == BPF_LINE_INFO_LINE_NUM(prev_linfo->line_col)) in verbose_linfo()
413 bpf_verifier_vlog(&env->log, prefix_fmt, args); in verbose_linfo()
417 btf = env->prog->aux->btf; in verbose_linfo()
418 s = ltrim(btf_name_by_offset(btf, linfo->line_off)); in verbose_linfo()
421 s = btf_name_by_offset(btf, linfo->file_name_off); in verbose_linfo()
425 verbose(env, " @ %s:%u\n", fname, BPF_LINE_INFO_LINE_NUM(linfo->line_col)); in verbose_linfo()
427 env->prev_linfo = linfo; in verbose_linfo()
432 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); in btf_type_name()
485 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s", in reg_type_str()
487 return env->tmp_str_buf; in reg_type_str()
515 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1; in iter_type_str()
602 /* For signed ranges, we want to unify 64-bit and 32-bit values in the in print_scalar_ranges()
605 * because negative 64-bit and 32-bit values >= -S32_MIN have the same in print_scalar_ranges()
609 * E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in in print_scalar_ranges()
619 {"smin", reg->smin_value, reg->smin_value == S64_MIN}, in print_scalar_ranges()
620 {"smax", reg->smax_value, reg->smax_value == S64_MAX}, in print_scalar_ranges()
621 {"umin", reg->umin_value, reg->umin_value == 0}, in print_scalar_ranges()
622 {"umax", reg->umax_value, reg->umax_value == U64_MAX}, in print_scalar_ranges()
624 is_snum_decimal((s64)reg->s32_min_value) in print_scalar_ranges()
625 ? (s64)reg->s32_min_value in print_scalar_ranges()
626 : (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN}, in print_scalar_ranges()
628 is_snum_decimal((s64)reg->s32_max_value) in print_scalar_ranges()
629 ? (s64)reg->s32_max_value in print_scalar_ranges()
630 : (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX}, in print_scalar_ranges()
631 {"umin32", reg->u32_min_value, reg->u32_min_value == 0}, in print_scalar_ranges()
632 {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX}, in print_scalar_ranges()
637 if (m1->omit) in print_scalar_ranges()
640 neg1 = m1->name[0] == 's' && (s64)m1->val < 0; in print_scalar_ranges()
642 verbose(env, "%s%s=", *sep, m1->name); in print_scalar_ranges()
646 if (m2->omit || m2->val != m1->val) in print_scalar_ranges()
649 neg2 = m2->name[0] == 's' && (s64)m2->val < 0; in print_scalar_ranges()
652 m2->omit = true; in print_scalar_ranges()
653 verbose(env, "%s=", m2->name); in print_scalar_ranges()
656 if (m1->name[0] == 's') in print_scalar_ranges()
657 verbose_snum(env, m1->val); in print_scalar_ranges()
659 verbose_unum(env, m1->val); in print_scalar_ranges()
687 t = reg->type; in print_reg_state()
688 if (t == SCALAR_VALUE && reg->precise) in print_reg_state()
690 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) { in print_reg_state()
691 verbose_snum(env, reg->var_off.value); in print_reg_state()
699 if (state->frameno != reg->frameno) in print_reg_state()
700 verbose(env, "[%d]", reg->frameno); in print_reg_state()
701 if (tnum_is_const(reg->var_off)) { in print_reg_state()
702 verbose_snum(env, reg->var_off.value + reg->off); in print_reg_state()
707 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id)); in print_reg_state()
709 if (reg->id) in print_reg_state()
710 verbose_a("id=%d", reg->id & ~BPF_ADD_CONST); in print_reg_state()
711 if (reg->id & BPF_ADD_CONST) in print_reg_state()
712 verbose(env, "%+d", reg->off); in print_reg_state()
713 if (reg->ref_obj_id) in print_reg_state()
714 verbose_a("ref_obj_id=%d", reg->ref_obj_id); in print_reg_state()
715 if (type_is_non_owning_ref(reg->type)) in print_reg_state()
718 if (reg->map_ptr->name[0]) in print_reg_state()
719 verbose_a("map=%s", reg->map_ptr->name); in print_reg_state()
721 reg->map_ptr->key_size, in print_reg_state()
722 reg->map_ptr->value_size); in print_reg_state()
724 if (t != SCALAR_VALUE && reg->off) { in print_reg_state()
725 verbose_a("off="); in print_reg_state()
726 verbose_snum(env, reg->off); in print_reg_state()
730 verbose_unum(env, reg->range); in print_reg_state()
734 verbose_unum(env, reg->mem_size); in print_reg_state()
737 verbose_a("type=%s", dynptr_type_str(reg->dynptr.type)); in print_reg_state()
738 if (tnum_is_const(reg->var_off)) { in print_reg_state()
740 if (reg->var_off.value) { in print_reg_state()
742 verbose_snum(env, reg->var_off.value); in print_reg_state()
746 if (!tnum_is_unknown(reg->var_off)) { in print_reg_state()
749 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in print_reg_state()
762 if (state->frameno) in print_verifier_state()
763 verbose(env, " frame%d:", state->frameno); in print_verifier_state()
765 reg = &state->regs[i]; in print_verifier_state()
766 if (reg->type == NOT_INIT) in print_verifier_state()
771 print_liveness(env, reg->live); in print_verifier_state()
775 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
786 slot_type = state->stack[i].slot_type[j]; in print_verifier_state()
795 reg = &state->stack[i].spilled_ptr; in print_verifier_state()
796 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) { in print_verifier_state()
800 if (state->stack[i].slot_type[j] == STACK_SPILL) in print_verifier_state()
804 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
805 print_liveness(env, reg->live); in print_verifier_state()
811 i += BPF_DYNPTR_NR_SLOTS - 1; in print_verifier_state()
812 reg = &state->stack[i].spilled_ptr; in print_verifier_state()
814 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
815 print_liveness(env, reg->live); in print_verifier_state()
816 verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type)); in print_verifier_state()
817 if (reg->id) in print_verifier_state()
818 verbose_a("id=%d", reg->id); in print_verifier_state()
819 if (reg->ref_obj_id) in print_verifier_state()
820 verbose_a("ref_id=%d", reg->ref_obj_id); in print_verifier_state()
821 if (reg->dynptr_id) in print_verifier_state()
822 verbose_a("dynptr_id=%d", reg->dynptr_id); in print_verifier_state()
827 if (!reg->ref_obj_id) in print_verifier_state()
830 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
831 print_liveness(env, reg->live); in print_verifier_state()
833 iter_type_str(reg->iter.btf, reg->iter.btf_id), in print_verifier_state()
834 reg->ref_obj_id, iter_state_str(reg->iter.state), in print_verifier_state()
835 reg->iter.depth); in print_verifier_state()
840 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
841 print_liveness(env, reg->live); in print_verifier_state()
846 if (state->acquired_refs && state->refs[0].id) { in print_verifier_state()
847 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
848 for (i = 1; i < state->acquired_refs; i++) in print_verifier_state()
849 if (state->refs[i].id) in print_verifier_state()
850 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
852 if (state->in_callback_fn) in print_verifier_state()
854 if (state->in_async_callback_fn) in print_verifier_state()
864 BPF_LOG_MIN_ALIGNMENT) - pos - 1; in vlog_alignment()
869 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) { in print_insn_state()
871 bpf_vlog_reset(&env->log, env->prev_log_pos - 1); in print_insn_state()
872 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' '); in print_insn_state()
874 verbose(env, "%d:", env->insn_idx); in print_insn_state()