/linux-6.12.1/io_uring/ |
D | tctx.c | 49 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() 57 tsk->io_uring = NULL; in __io_uring_free() 88 task->io_uring = tctx; in io_uring_alloc_task_context() 96 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node() 105 tctx = current->io_uring; in __io_uring_add_tctx_node() 148 current->io_uring->last = ctx; in __io_uring_add_tctx_node_from_submit() 157 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node() 200 struct io_uring_task *tctx = current->io_uring; in io_uring_unreg_ringfd() 270 tctx = current->io_uring; in io_ringfd_register() 316 struct io_uring_task *tctx = current->io_uring; in io_ringfd_unregister()
|
D | cancel.c | 109 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring); in io_try_cancel() 187 ret = io_async_cancel_one(node->task->io_uring, cd); in __io_async_cancel() 208 struct io_uring_task *tctx = req->task->io_uring; in io_async_cancel() 301 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); in io_sync_cancel() 325 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); in io_sync_cancel()
|
D | register.c | 205 ret = io_wq_cpu_affinity(current->io_uring, new_mask); in __io_register_iowq_aff() 281 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers() 284 tctx = current->io_uring; in io_register_iowq_max_workers() 318 tctx = node->task->io_uring; in io_register_iowq_max_workers() 573 struct io_uring_task *tctx = current->io_uring; in io_uring_register_get_file()
|
D | sqpoll.c | 233 struct io_uring_task *tctx = current->io_uring; in io_sq_tw() 251 struct io_uring_task *tctx = current->io_uring; in io_sq_tw_pending() 278 if (!current->io_uring) in io_sq_thread() 523 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask); in io_sqpoll_wq_cpu_affinity()
|
D | tctx.h | 27 struct io_uring_task *tctx = current->io_uring; in io_uring_add_tctx_node()
|
D | io_uring.h | 327 if (current->io_uring) { in io_run_task_work() 331 tctx_task_work_run(current->io_uring, UINT_MAX, &count); in io_run_task_work() 379 struct io_uring_task *tctx = current->io_uring; in io_get_task_refs()
|
D | Makefile | 9 obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
|
D | io_uring.c | 409 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op() 426 atomic_inc(&req->task->io_uring->inflight_tracked); in io_req_track_inflight() 515 struct io_uring_task *tctx = req->task->io_uring; in io_queue_iowq() 681 struct io_uring_task *tctx = task->io_uring; in io_put_task_remote() 692 task->io_uring->cached_refs++; in io_put_task_local() 715 struct io_uring_task *tctx = task->io_uring; in io_uring_drop_tctx_refs() 1231 struct io_uring_task *tctx = req->task->io_uring; in io_req_normal_work_add() 2336 current->io_uring->cached_refs += left; in io_submit_sqes() 2375 struct io_uring_task *tctx = current->io_uring; in current_pending_io() 2866 struct io_uring_task *tctx = current->io_uring; in io_tctx_exit_cb() [all …]
|
D | timeout.c | 312 ret = io_try_cancel(req->task->io_uring, &cd, 0); in io_req_task_link_timeout()
|
/linux-6.12.1/include/linux/ |
D | io_uring.h | 18 if (current->io_uring) { in io_uring_files_cancel() 25 if (current->io_uring) in io_uring_task_cancel() 30 if (tsk->io_uring) in io_uring_free()
|
D | io_uring_types.h | 103 struct io_uring { struct 124 struct io_uring sq, cq; argument
|
D | sched.h | 1147 struct io_uring_task *io_uring; member
|
/linux-6.12.1/tools/include/io_uring/ |
D | mini_liburing.h | 54 struct io_uring { struct 130 struct io_uring *ring, in io_uring_queue_init() 152 static inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) in io_uring_get_sqe() 161 static inline int io_uring_wait_cqe(struct io_uring *ring, in io_uring_wait_cqe() 185 static inline int io_uring_submit(struct io_uring *ring) in io_uring_submit() 220 static inline void io_uring_queue_exit(struct io_uring *ring) in io_uring_queue_exit() 247 static inline int io_uring_register_buffers(struct io_uring *ring, in io_uring_register_buffers() 278 static inline void io_uring_cqe_seen(struct io_uring *ring) in io_uring_cqe_seen()
|
/linux-6.12.1/Documentation/block/ |
D | ublk.rst | 47 ``io_uring`` passthrough command; that is why ublk is also one io_uring based 48 block driver. It has been observed that using io_uring passthrough command can 51 done by io_uring, but also the preferred IO handling in ublk server is io_uring 76 # do anything. all IOs are handled by io_uring 119 pthread & io_uring for handling ublk IO), this command is sent to the 127 io_uring). 232 ublk server needs to create per-queue IO pthread & io_uring for handling IO 233 commands via io_uring passthrough. The per-queue IO pthread 246 The following IO commands are communicated via io_uring passthrough command, 262 the IO notification via io_uring.
|
/linux-6.12.1/tools/testing/vsock/ |
D | vsock_uring_test.c | 64 struct io_uring ring; in vsock_io_uring_client() 117 struct io_uring ring; in vsock_io_uring_server()
|
/linux-6.12.1/ |
D | Kbuild | 91 obj-$(CONFIG_IO_URING) += io_uring/
|
/linux-6.12.1/init/ |
D | init_task.c | 128 .io_uring = NULL,
|
/linux-6.12.1/tools/testing/selftests/net/ |
D | io_uring_zerocopy_tx.c | 99 struct io_uring ring; in do_tx()
|
/linux-6.12.1/tools/testing/selftests/mm/ |
D | gup_longterm.c | 191 struct io_uring ring; in do_test()
|
/linux-6.12.1/drivers/block/ |
D | Kconfig | 386 io_uring based userspace block driver. Together with ublk server, ublk 392 can be used in IO path for replacing io_uring cmd, which will become 406 may help security subsystem to audit io_uring command.
|
/linux-6.12.1/Documentation/core-api/ |
D | protection-keys.rst | 118 Note that kernel accesses from a kthread (such as io_uring) will use a default
|
/linux-6.12.1/Documentation/admin-guide/sysctl/ |
D | kernel.rst | 471 Prevents all processes from creating new io_uring instances. Enabling this 475 0 All processes can create io_uring instances as normal. This is the 477 1 io_uring creation is disabled (io_uring_setup() will fail with 479 Existing io_uring instances can still be used. See the 481 2 io_uring creation is disabled for all processes. io_uring_setup() 482 always fails with -EPERM. Existing io_uring instances can still be 492 to create an io_uring instance. If io_uring_group is set to -1 (the 494 io_uring instances.
|
/linux-6.12.1/include/trace/events/ |
D | io_uring.h | 3 #define TRACE_SYSTEM io_uring
|
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/ |
D | sockopt.c | 997 struct io_uring ring; in uring_sockopt()
|
/linux-6.12.1/Documentation/networking/ |
D | napi.rst | 207 ``net.core.busy_read`` sysctls. An io_uring API for NAPI busy polling
|