Lines Matching +full:async +full:- +full:enum

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
30 * inside its own verifier_ops->get_func_proto() callback it should return
42 return (unsigned long) map->ops->map_lookup_elem(map, key); in BPF_CALL_2()
59 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4()
77 return map->ops->map_delete_elem(map, key); in BPF_CALL_2()
91 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3()
106 return map->ops->map_pop_elem(map, value); in BPF_CALL_2()
119 return map->ops->map_peek_elem(map, value); in BPF_CALL_2()
133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); in BPF_CALL_3()
227 return -EINVAL; in BPF_CALL_0()
229 return (u64) task->tgid << 32 | task->pid; in BPF_CALL_0()
245 return -EINVAL; in BPF_CALL_0()
266 strscpy_pad(buf, task->comm, size); in BPF_CALL_2()
270 return -EINVAL; in BPF_CALL_2()
381 lock = src + map->record->spin_lock_off; in copy_map_value_locked()
383 lock = dst + map->record->spin_lock_off; in copy_map_value_locked()
458 return -EINVAL; in __bpf_strtoull()
461 return -EINVAL; in __bpf_strtoull()
464 return -EINVAL; in __bpf_strtoull()
469 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); in __bpf_strtoull()
473 consumed = cur_buf - buf; in __bpf_strtoull()
474 cur_len -= consumed; in __bpf_strtoull()
476 return -EINVAL; in __bpf_strtoull()
478 cur_len = min(cur_len, sizeof(str) - 1); in __bpf_strtoull()
487 return -ERANGE; in __bpf_strtoull()
490 return -EINVAL; in __bpf_strtoull()
493 consumed += cur_buf - str; in __bpf_strtoull()
509 if ((long long)-_res > 0) in __bpf_strtoll()
510 return -ERANGE; in __bpf_strtoll()
511 *res = -_res; in __bpf_strtoll()
514 return -ERANGE; in __bpf_strtoll()
557 return -EINVAL; in BPF_CALL_4()
592 int err = -EINVAL; in BPF_CALL_4()
605 err = -ENOENT; in BPF_CALL_4()
609 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) in BPF_CALL_4()
612 nsdata->pid = task_pid_nr_ns(task, pidns); in BPF_CALL_4()
613 nsdata->tgid = task_tgid_nr_ns(task, pidns); in BPF_CALL_4()
640 return -EINVAL; in BPF_CALL_5()
663 ret = -EFAULT; in BPF_CALL_3()
686 return -EINVAL; in BPF_CALL_5()
696 /* Return -EFAULT for partial read */ in BPF_CALL_5()
697 return ret < 0 ? ret : -EFAULT; in BPF_CALL_5()
761 return -EINVAL; in bpf_trace_copy_string()
764 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
788 return -EBUSY; in try_get_buffers()
790 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); in try_get_buffers()
797 if (!data->bin_args && !data->buf) in bpf_bprintf_cleanup()
806 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
811 * - Format string verification only: when data->get_bin_args is false
812 * - Arguments preparation: in addition to the above verification, it writes in
813 * data->bin_args a binary representation of arguments usable by bstr_printf
822 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; in bpf_bprintf_prepare()
832 return -EINVAL; in bpf_bprintf_prepare()
833 fmt_size = fmt_end - fmt; in bpf_bprintf_prepare()
836 return -EBUSY; in bpf_bprintf_prepare()
838 if (data->get_bin_args) { in bpf_bprintf_prepare()
840 tmp_buf = buffers->bin_args; in bpf_bprintf_prepare()
842 data->bin_args = (u32 *)tmp_buf; in bpf_bprintf_prepare()
845 if (data->get_buf) in bpf_bprintf_prepare()
846 data->buf = buffers->buf; in bpf_bprintf_prepare()
850 err = -EINVAL; in bpf_bprintf_prepare()
863 err = -EINVAL; in bpf_bprintf_prepare()
867 /* The string is zero-terminated so if fmt[i] != 0, we can in bpf_bprintf_prepare()
872 /* skip optional "[0 +-][num]" width formatting field */ in bpf_bprintf_prepare()
873 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || in bpf_bprintf_prepare()
906 (tmp_buf_end - tmp_buf), in bpf_bprintf_prepare()
920 err = -EINVAL; in bpf_bprintf_prepare()
929 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { in bpf_bprintf_prepare()
930 err = -ENOSPC; in bpf_bprintf_prepare()
941 * pre-formatted as strings, ironically, the easiest way in bpf_bprintf_prepare()
944 ip_spec[2] = fmt[i - 1]; in bpf_bprintf_prepare()
946 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, in bpf_bprintf_prepare()
959 err = -EINVAL; in bpf_bprintf_prepare()
967 err = -ENOSPC; in bpf_bprintf_prepare()
974 tmp_buf_end - tmp_buf); in bpf_bprintf_prepare()
989 err = -ENOSPC; in bpf_bprintf_prepare()
1013 err = -EINVAL; in bpf_bprintf_prepare()
1022 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { in bpf_bprintf_prepare()
1023 err = -ENOSPC; in bpf_bprintf_prepare()
1055 return -EINVAL; in BPF_CALL_5()
1058 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we in BPF_CALL_5()
1105 * ops->map_release_uref callback is responsible for cancelling the timers,
1108 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1137 enum bpf_async_type {
1144 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) in bpf_timer_cb()
1147 struct bpf_map *map = t->cb.map; in bpf_timer_cb()
1148 void *value = t->cb.value; in bpf_timer_cb()
1154 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); in bpf_timer_cb()
1165 if (map->map_type == BPF_MAP_TYPE_ARRAY) { in bpf_timer_cb()
1169 idx = ((char *)value - array->value) / array->elem_size; in bpf_timer_cb()
1172 key = value - round_up(map->key_size, 8); in bpf_timer_cb()
1186 struct bpf_async_cb *cb = &w->cb; in bpf_wq_work()
1187 struct bpf_map *map = cb->map; in bpf_wq_work()
1189 void *value = cb->value; in bpf_wq_work()
1195 callback_fn = READ_ONCE(cb->callback_fn); in bpf_wq_work()
1199 if (map->map_type == BPF_MAP_TYPE_ARRAY) { in bpf_wq_work()
1203 idx = ((char *)value - array->value) / array->elem_size; in bpf_wq_work()
1206 key = value - round_up(map->key_size, 8); in bpf_wq_work()
1222 cancel_work_sync(&w->work); in bpf_wq_delete_work()
1233 * kfree_rcu(t) right after for both preallocated and non-preallocated in bpf_timer_delete_work()
1234 * maps. The async->cb = NULL was already done and no code path can see in bpf_timer_delete_work()
1238 hrtimer_cancel(&t->timer); in bpf_timer_delete_work()
1242 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, in __bpf_async_init() argument
1243 enum bpf_async_type type) in __bpf_async_init()
1253 return -EOPNOTSUPP; in __bpf_async_init()
1263 return -EINVAL; in __bpf_async_init()
1266 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_init()
1267 t = async->timer; in __bpf_async_init()
1269 ret = -EBUSY; in __bpf_async_init()
1274 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); in __bpf_async_init()
1276 ret = -ENOMEM; in __bpf_async_init()
1282 clockid = flags & (MAX_CLOCKS - 1); in __bpf_async_init()
1285 atomic_set(&t->cancelling, 0); in __bpf_async_init()
1286 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); in __bpf_async_init()
1287 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); in __bpf_async_init()
1288 t->timer.function = bpf_timer_cb; in __bpf_async_init()
1289 cb->value = (void *)async - map->record->timer_off; in __bpf_async_init()
1294 INIT_WORK(&w->work, bpf_wq_work); in __bpf_async_init()
1295 INIT_WORK(&w->delete_work, bpf_wq_delete_work); in __bpf_async_init()
1296 cb->value = (void *)async - map->record->wq_off; in __bpf_async_init()
1299 cb->map = map; in __bpf_async_init()
1300 cb->prog = NULL; in __bpf_async_init()
1301 cb->flags = flags; in __bpf_async_init()
1302 rcu_assign_pointer(cb->callback_fn, NULL); in __bpf_async_init()
1304 WRITE_ONCE(async->cb, cb); in __bpf_async_init()
1305 /* Guarantee the order between async->cb and map->usercnt. So in __bpf_async_init()
1307 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL in __bpf_async_init()
1311 if (!atomic64_read(&map->usercnt)) { in __bpf_async_init()
1315 WRITE_ONCE(async->cb, NULL); in __bpf_async_init()
1317 ret = -EPERM; in __bpf_async_init()
1320 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_init()
1327 clock_t clockid = flags & (MAX_CLOCKS - 1); in BPF_CALL_3()
1338 return -EINVAL; in BPF_CALL_3()
1352 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, in __bpf_async_set_callback() argument
1354 enum bpf_async_type type) in __bpf_async_set_callback()
1356 struct bpf_prog *prev, *prog = aux->prog; in __bpf_async_set_callback()
1361 return -EOPNOTSUPP; in __bpf_async_set_callback()
1362 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_set_callback()
1363 cb = async->cb; in __bpf_async_set_callback()
1365 ret = -EINVAL; in __bpf_async_set_callback()
1368 if (!atomic64_read(&cb->map->usercnt)) { in __bpf_async_set_callback()
1374 ret = -EPERM; in __bpf_async_set_callback()
1377 prev = cb->prog; in __bpf_async_set_callback()
1380 * can pick different callback_fn-s within the same prog. in __bpf_async_set_callback()
1390 cb->prog = prog; in __bpf_async_set_callback()
1392 rcu_assign_pointer(cb->callback_fn, callback_fn); in __bpf_async_set_callback()
1394 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_set_callback()
1416 enum hrtimer_mode mode; in BPF_CALL_3()
1419 return -EOPNOTSUPP; in BPF_CALL_3()
1421 return -EINVAL; in BPF_CALL_3()
1422 __bpf_spin_lock_irqsave(&timer->lock); in BPF_CALL_3()
1423 t = timer->timer; in BPF_CALL_3()
1424 if (!t || !t->cb.prog) { in BPF_CALL_3()
1425 ret = -EINVAL; in BPF_CALL_3()
1437 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); in BPF_CALL_3()
1439 __bpf_spin_unlock_irqrestore(&timer->lock); in BPF_CALL_3()
1452 static void drop_prog_refcnt(struct bpf_async_cb *async) in drop_prog_refcnt() argument
1454 struct bpf_prog *prog = async->prog; in drop_prog_refcnt()
1458 async->prog = NULL; in drop_prog_refcnt()
1459 rcu_assign_pointer(async->callback_fn, NULL); in drop_prog_refcnt()
1470 return -EOPNOTSUPP; in BPF_CALL_1()
1472 __bpf_spin_lock_irqsave(&timer->lock); in BPF_CALL_1()
1473 t = timer->timer; in BPF_CALL_1()
1475 ret = -EINVAL; in BPF_CALL_1()
1485 ret = -EDEADLK; in BPF_CALL_1()
1489 /* Only account in-flight cancellations when invoked from a timer in BPF_CALL_1()
1491 * are waiting on us, to avoid introducing lockups. Non-callback paths in BPF_CALL_1()
1496 atomic_inc(&t->cancelling); in BPF_CALL_1()
1500 if (atomic_read(&cur_t->cancelling)) { in BPF_CALL_1()
1509 ret = -EDEADLK; in BPF_CALL_1()
1513 drop_prog_refcnt(&t->cb); in BPF_CALL_1()
1515 __bpf_spin_unlock_irqrestore(&timer->lock); in BPF_CALL_1()
1519 ret = ret ?: hrtimer_cancel(&t->timer); in BPF_CALL_1()
1521 atomic_dec(&t->cancelling); in BPF_CALL_1()
1533 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) in __bpf_async_cancel_and_free() argument
1537 /* Performance optimization: read async->cb without lock first. */ in __bpf_async_cancel_and_free()
1538 if (!READ_ONCE(async->cb)) in __bpf_async_cancel_and_free()
1541 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_cancel_and_free()
1542 /* re-read it under lock */ in __bpf_async_cancel_and_free()
1543 cb = async->cb; in __bpf_async_cancel_and_free()
1550 WRITE_ONCE(async->cb, NULL); in __bpf_async_cancel_and_free()
1552 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_cancel_and_free()
1557 * by ops->map_release_uref when the user space reference to a map reaches zero.
1570 * just return -1). Though callback_fn is still running on this cpu it's in bpf_timer_cancel_and_free()
1573 * since async->cb = NULL was already done. The timer will be in bpf_timer_cancel_and_free()
1597 queue_work(system_unbound_wq, &t->cb.delete_work); in bpf_timer_cancel_and_free()
1599 bpf_timer_delete_work(&t->cb.delete_work); in bpf_timer_cancel_and_free()
1603 * by ops->map_release_uref when the user space reference to a map reaches zero.
1619 schedule_work(&work->delete_work); in bpf_wq_cancel_and_free()
1644 /* Since the upper 8 bits of dynptr->size is reserved, the
1645 * maximum supported size is 2^24 - 1.
1647 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1654 return ptr->size & DYNPTR_RDONLY_BIT; in __bpf_dynptr_is_rdonly()
1659 ptr->size |= DYNPTR_RDONLY_BIT; in bpf_dynptr_set_rdonly()
1662 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) in bpf_dynptr_set_type()
1664 ptr->size |= type << DYNPTR_TYPE_SHIFT; in bpf_dynptr_set_type()
1667 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) in bpf_dynptr_get_type()
1669 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; in bpf_dynptr_get_type()
1674 return ptr->size & DYNPTR_SIZE_MASK; in __bpf_dynptr_size()
1679 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; in bpf_dynptr_set_size()
1681 ptr->size = new_size | metadata; in bpf_dynptr_set_size()
1686 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; in bpf_dynptr_check_size()
1690 enum bpf_dynptr_type type, u32 offset, u32 size) in bpf_dynptr_init()
1692 ptr->data = data; in bpf_dynptr_init()
1693 ptr->offset = offset; in bpf_dynptr_init()
1694 ptr->size = size; in bpf_dynptr_init()
1707 if (len > size || offset > size - len) in bpf_dynptr_check_off_len()
1708 return -E2BIG; in bpf_dynptr_check_off_len()
1725 err = -EINVAL; in BPF_CALL_4()
1751 enum bpf_dynptr_type type; in BPF_CALL_5()
1754 if (!src->data || flags) in BPF_CALL_5()
1755 return -EINVAL; in BPF_CALL_5()
1770 memmove(dst, src->data + src->offset + offset, len); in BPF_CALL_5()
1773 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); in BPF_CALL_5()
1775 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); in BPF_CALL_5()
1778 return -EFAULT; in BPF_CALL_5()
1796 enum bpf_dynptr_type type; in BPF_CALL_5()
1799 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) in BPF_CALL_5()
1800 return -EINVAL; in BPF_CALL_5()
1812 return -EINVAL; in BPF_CALL_5()
1817 memmove(dst->data + dst->offset + offset, src, len); in BPF_CALL_5()
1820 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, in BPF_CALL_5()
1824 return -EINVAL; in BPF_CALL_5()
1825 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); in BPF_CALL_5()
1828 return -EFAULT; in BPF_CALL_5()
1845 enum bpf_dynptr_type type; in BPF_CALL_3()
1848 if (!ptr->data) in BPF_CALL_3()
1863 return (unsigned long)(ptr->data + ptr->offset + offset); in BPF_CALL_3()
1892 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in bpf_base_func_proto()
1947 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) in bpf_base_func_proto()
2005 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) in bpf_base_func_proto()
2054 if (!head->next || list_empty(head)) in bpf_list_head_free()
2056 head = head->next; in bpf_list_head_free()
2064 obj -= field->graph_root.node_offset; in bpf_list_head_free()
2065 head = head->next; in bpf_list_head_free()
2070 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_list_head_free()
2105 obj -= field->graph_root.node_offset; in bpf_rb_root_free()
2109 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_rb_root_free()
2126 bpf_obj_init(meta->record, p); in bpf_obj_new_impl()
2143 if (rec && rec->refcount_off >= 0 && in __bpf_obj_drop_impl()
2144 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { in __bpf_obj_drop_impl()
2166 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); in bpf_obj_drop_impl()
2183 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); in bpf_refcount_acquire_impl()
2197 struct list_head *n = &node->list_head, *h = (void *)head; in __bpf_list_add()
2199 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't in __bpf_list_add()
2202 if (unlikely(!h->next)) in __bpf_list_add()
2205 /* node->owner != NULL implies !list_empty(n), no need to separately in __bpf_list_add()
2208 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { in __bpf_list_add()
2210 __bpf_obj_drop_impl((void *)n - off, rec, false); in __bpf_list_add()
2211 return -EINVAL; in __bpf_list_add()
2215 WRITE_ONCE(node->owner, head); in __bpf_list_add()
2227 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); in bpf_list_push_front_impl()
2237 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); in bpf_list_push_back_impl()
2245 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't in __bpf_list_del()
2248 if (unlikely(!h->next)) in __bpf_list_del()
2253 n = tail ? h->prev : h->next; in __bpf_list_del()
2255 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) in __bpf_list_del()
2259 WRITE_ONCE(node->owner, NULL); in __bpf_list_del()
2278 struct rb_node *n = &node_internal->rb_node; in bpf_rbtree_remove()
2280 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or in bpf_rbtree_remove()
2283 if (READ_ONCE(node_internal->owner) != root) in bpf_rbtree_remove()
2288 WRITE_ONCE(node_internal->owner, NULL); in bpf_rbtree_remove()
2299 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; in __bpf_rbtree_add()
2300 struct rb_node *parent = NULL, *n = &node->rb_node; in __bpf_rbtree_add()
2304 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately in __bpf_rbtree_add()
2307 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { in __bpf_rbtree_add()
2309 __bpf_obj_drop_impl((void *)n - off, rec, false); in __bpf_rbtree_add()
2310 return -EINVAL; in __bpf_rbtree_add()
2316 link = &parent->rb_left; in __bpf_rbtree_add()
2318 link = &parent->rb_right; in __bpf_rbtree_add()
2325 WRITE_ONCE(node->owner, root); in __bpf_rbtree_add()
2336 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); in bpf_rbtree_add_impl()
2347 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2354 if (refcount_inc_not_zero(&p->rcu_users)) in bpf_task_acquire()
2360 * bpf_task_release - Release the reference acquired on a task.
2376 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2387 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2405 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2415 if (level > cgrp->level || level < 0) in bpf_cgroup_ancestor()
2419 ancestor = cgrp->ancestors[level]; in bpf_cgroup_ancestor()
2426 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2442 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2467 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2()
2468 return -E2BIG; in BPF_CALL_2()
2470 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2()
2472 return -EAGAIN; in BPF_CALL_2()
2486 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2506 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2525 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2528 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2532 * For non-skb and non-xdp type dynptrs, there is no difference between
2544 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2547 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2556 enum bpf_dynptr_type type; in bpf_dynptr_slice()
2560 if (!ptr->data) in bpf_dynptr_slice()
2572 return ptr->data + ptr->offset + offset; in bpf_dynptr_slice()
2575 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); in bpf_dynptr_slice()
2577 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); in bpf_dynptr_slice()
2580 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); in bpf_dynptr_slice()
2586 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); in bpf_dynptr_slice()
2596 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2599 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2603 * For non-skb and non-xdp type dynptrs, there is no difference between
2629 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2642 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) in bpf_dynptr_slice_rdwr()
2647 * For skb-type dynptrs, it is safe to write into the returned pointer in bpf_dynptr_slice_rdwr()
2675 if (!ptr->data || start > end) in bpf_dynptr_adjust()
2676 return -EINVAL; in bpf_dynptr_adjust()
2681 return -ERANGE; in bpf_dynptr_adjust()
2683 ptr->offset += start; in bpf_dynptr_adjust()
2684 bpf_dynptr_set_size(ptr, end - start); in bpf_dynptr_adjust()
2693 return !ptr->data; in bpf_dynptr_is_null()
2700 if (!ptr->data) in bpf_dynptr_is_rdonly()
2710 if (!ptr->data) in bpf_dynptr_size()
2711 return -EINVAL; in bpf_dynptr_size()
2722 if (!ptr->data) { in bpf_dynptr_clone()
2724 return -EINVAL; in bpf_dynptr_clone()
2765 return !ctx->cnt; in bpf_stack_walker()
2767 ctx->cnt++; in bpf_stack_walker()
2770 ctx->aux = prog->aux; in bpf_stack_walker()
2771 ctx->sp = sp; in bpf_stack_walker()
2772 ctx->bp = bp; in bpf_stack_walker()
2783 WARN_ON_ONCE(!ctx.aux->exception_boundary); in bpf_throw()
2791 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); in bpf_throw()
2797 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_init() local
2804 return -EINVAL; in bpf_wq_init()
2806 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); in bpf_wq_init()
2811 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_start() local
2815 return -EOPNOTSUPP; in bpf_wq_start()
2817 return -EINVAL; in bpf_wq_start()
2818 w = READ_ONCE(async->work); in bpf_wq_start()
2819 if (!w || !READ_ONCE(w->cb.prog)) in bpf_wq_start()
2820 return -EINVAL; in bpf_wq_start()
2822 schedule_work(&w->work); in bpf_wq_start()
2832 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_set_callback_impl() local
2835 return -EINVAL; in bpf_wq_set_callback_impl()
2837 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); in bpf_wq_set_callback_impl()
2865 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
2867 * return the same result, as both point to the same 8-byte area.
2869 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
2871 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
2872 * long is composed of bits 32-63 of the u64.
2874 * However, for 32-bit big-endian hosts, this is not the case. The first
2875 * iterated unsigned long will be bits 32-63 of the u64, so swap these two
2889 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
2892 * @nr_words: The size of the specified memory area, measured in 8-byte units.
2915 kit->nr_bits = 0; in bpf_iter_bits_new()
2916 kit->bits_copy = 0; in bpf_iter_bits_new()
2917 kit->bit = -1; in bpf_iter_bits_new()
2920 return -EINVAL; in bpf_iter_bits_new()
2922 return -E2BIG; in bpf_iter_bits_new()
2926 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); in bpf_iter_bits_new()
2928 return -EFAULT; in bpf_iter_bits_new()
2930 swap_ulong_in_u64(&kit->bits_copy, nr_words); in bpf_iter_bits_new()
2932 kit->nr_bits = nr_bits; in bpf_iter_bits_new()
2937 return -E2BIG; in bpf_iter_bits_new()
2940 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); in bpf_iter_bits_new()
2941 if (!kit->bits) in bpf_iter_bits_new()
2942 return -ENOMEM; in bpf_iter_bits_new()
2944 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); in bpf_iter_bits_new()
2946 bpf_mem_free(&bpf_global_ma, kit->bits); in bpf_iter_bits_new()
2950 swap_ulong_in_u64(kit->bits, nr_words); in bpf_iter_bits_new()
2952 kit->nr_bits = nr_bits; in bpf_iter_bits_new()
2957 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
2968 int bit = kit->bit, nr_bits = kit->nr_bits; in bpf_iter_bits_next()
2974 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; in bpf_iter_bits_next()
2977 kit->bit = bit; in bpf_iter_bits_next()
2981 kit->bit = bit; in bpf_iter_bits_next()
2982 return &kit->bit; in bpf_iter_bits_next()
2986 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
2995 if (kit->nr_bits <= 64) in bpf_iter_bits_destroy()
2997 bpf_mem_free(&bpf_global_ma, kit->bits); in bpf_iter_bits_destroy()
3001 * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3008 * Copies a NUL-terminated string from userspace to BPF space. If user string is
3020 return -EINVAL; in bpf_copy_from_user_str()
3025 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); in bpf_copy_from_user_str()
3034 memset((char *)dst + ret, 0, dst__sz - ret); in bpf_copy_from_user_str()