Searched refs:bpf_perf_event_output (Results 1 – 15 of 15) sorted by relevance
/linux-6.12.1/tools/testing/selftests/bpf/progs/ |
D | test_get_stack_rawtp.c | 80 bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data)); in bpf_prog1() 97 bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size); in bpf_prog1()
|
D | perfbuf_bench.c | 28 if (bpf_perf_event_output(ctx, &perfbuf, BPF_F_CURRENT_CPU, in bench_perfbuf()
|
D | test_xdp_attach_fail.c | 45 bpf_perf_event_output(ctx, &xdp_errmsg_pb, BPF_F_CURRENT_CPU, &errmsg, in tp__xdp__bpf_xdp_link_attach_failed()
|
D | test_perf_buffer.c | 36 bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU, in handle_sys_enter()
|
D | profiler.inc.h | 592 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, sysctl_data, data_len); in BPF_KPROBE() 667 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, kill_data, data_len); in raw_tracepoint__sched_process_exit() 748 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, proc_exec_data, data_len); in raw_tracepoint__sched_process_exec() 811 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len); in kprobe_ret__do_filp_open() 871 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len); in BPF_KPROBE() 925 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len); in BPF_KPROBE() 955 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, fork_data, data_len); in raw_tracepoint__sched_process_fork()
|
D | test_tcpnotify_kern.c | 82 bpf_perf_event_output(skops, &perf_event_map, in bpf_testcb()
|
D | test_unpriv_bpf_disabled.c | 71 bpf_perf_event_output(ctx, &perfbuf, BPF_F_CURRENT_CPU, &perfbuf_val, sizeof(perfbuf_val)); in sys_nanosleep_enter()
|
D | pyperf.h | 342 bpf_perf_event_output(ctx, &perfmap, 0, event, offsetof(Event, metadata));
|
D | strobemeta.h | 631 bpf_perf_event_output(ctx, &samples, 0, sample, 1 + sample_size); in on_event()
|
/linux-6.12.1/samples/bpf/ |
D | trace_output.bpf.c | 23 bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data)); in bpf_prog1()
|
/linux-6.12.1/tools/bpf/runqslower/ |
D | runqslower.bpf.c | 100 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, in handle__sched_switch()
|
/linux-6.12.1/tools/perf/util/bpf_skel/ |
D | augmented_raw_syscalls.bpf.c | 157 return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len); in augmented__output() 162 return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, data, len); in augmented__beauty_output()
|
/linux-6.12.1/Documentation/bpf/ |
D | ringbuf.rst | 87 buffer, similarly to ``bpf_perf_event_output()``; 99 closely matches ``bpf_perf_event_output()``, so will simplify migration
|
/linux-6.12.1/tools/bpf/bpftool/Documentation/ |
D | bpftool-map.rst | 125 **bpf_perf_event_output**\ () call in the kernel. By default read the
|
/linux-6.12.1/kernel/trace/ |
D | bpf_trace.c | 660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, in BPF_CALL_5() argument 700 .func = bpf_perf_event_output,
|