Lines Matching +full:trace +full:- +full:buffer +full:- +full:extension

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
16 #include "trace.h"
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
63 { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
65 { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
68 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
72 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
102 struct ftrace_graph_ent *trace, in __trace_graph_entry() argument
107 struct trace_buffer *buffer = tr->array_buffer.buffer; in __trace_graph_entry() local
110 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, in __trace_graph_entry()
115 entry->graph_ent = *trace; in __trace_graph_entry()
116 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_entry()
117 trace_buffer_unlock_commit_nostack(buffer, event); in __trace_graph_entry()
130 int trace_graph_entry(struct ftrace_graph_ent *trace, in trace_graph_entry() argument
134 struct trace_array *tr = gops->private; in trace_graph_entry()
146 * Do not trace a function if it's filtered by set_graph_notrace. in trace_graph_entry()
152 if (ftrace_graph_notrace_addr(trace->func)) { in trace_graph_entry()
164 if (ftrace_graph_ignore_func(gops, trace)) in trace_graph_entry()
172 * events to the ring buffer. in trace_graph_entry()
179 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry()
180 disabled = atomic_inc_return(&data->disabled); in trace_graph_entry()
183 ret = __trace_graph_entry(tr, trace, trace_ctx); in trace_graph_entry()
188 atomic_dec(&data->disabled); in trace_graph_entry()
223 struct ftrace_graph_ret *trace, in __trace_graph_return() argument
228 struct trace_buffer *buffer = tr->array_buffer.buffer; in __trace_graph_return() local
231 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, in __trace_graph_return()
236 entry->ret = *trace; in __trace_graph_return()
237 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_return()
238 trace_buffer_unlock_commit_nostack(buffer, event); in __trace_graph_return()
241 void trace_graph_return(struct ftrace_graph_ret *trace, in trace_graph_return() argument
245 struct trace_array *tr = gops->private; in trace_graph_return()
252 ftrace_graph_addr_finish(gops, trace); in trace_graph_return()
261 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return()
262 disabled = atomic_inc_return(&data->disabled); in trace_graph_return()
265 __trace_graph_return(tr, trace, trace_ctx); in trace_graph_return()
267 atomic_dec(&data->disabled); in trace_graph_return()
271 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace, in trace_graph_thresh_return() argument
274 ftrace_graph_addr_finish(gops, trace); in trace_graph_thresh_return()
282 (trace->rettime - trace->calltime < tracing_thresh)) in trace_graph_thresh_return()
285 trace_graph_return(trace, gops); in trace_graph_thresh_return()
299 return -ENOMEM; in allocate_fgraph_ops()
301 gops->entryfunc = &trace_graph_entry; in allocate_fgraph_ops()
302 gops->retfunc = &trace_graph_return; in allocate_fgraph_ops()
304 tr->gops = gops; in allocate_fgraph_ops()
305 gops->private = tr; in allocate_fgraph_ops()
307 fgraph_init_ops(&gops->ops, ops); in allocate_fgraph_ops()
314 kfree(tr->gops); in free_fgraph_ops()
319 tr->gops = &funcgraph_ops; in init_array_fgraph_ops()
321 fgraph_init_ops(&tr->gops->ops, ops); in init_array_fgraph_ops()
328 tr->gops->entryfunc = trace_graph_entry; in graph_trace_init()
331 tr->gops->retfunc = trace_graph_thresh_return; in graph_trace_init()
333 tr->gops->retfunc = trace_graph_return; in graph_trace_init()
338 ret = register_ftrace_graph(tr->gops); in graph_trace_init()
349 unregister_ftrace_graph(tr->gops); in graph_trace_reset()
363 * Start with a space character - to make it stand out in print_graph_cpu()
364 * to the right a bit when trace output is pasted into in print_graph_cpu()
385 /* 1 stands for the "-" character */ in print_graph_proc()
389 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; in print_graph_proc()
395 trace_seq_printf(s, "%s-%s", comm, pid_str); in print_graph_proc()
398 for (i = 0; i < spaces - (spaces / 2); i++) in print_graph_proc()
410 /* If the pid changed since the last trace, output this event */
420 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid()
428 if (prev_pid == -1) in verif_pid()
431 * Context-switch trace line: in verif_pid()
433 ------------------------------------------ in verif_pid()
434 | 1) migration/0--1 => sshd-1755 in verif_pid()
435 ------------------------------------------ in verif_pid()
438 trace_seq_puts(s, " ------------------------------------------\n"); in verif_pid()
443 trace_seq_puts(s, "\n ------------------------------------------\n\n"); in verif_pid()
450 struct fgraph_data *data = iter->private; in get_return_for_leaf()
456 * If the previous output failed to write to the seq buffer, in get_return_for_leaf()
459 if (data && data->failed) { in get_return_for_leaf()
460 curr = &data->ent; in get_return_for_leaf()
461 next = &data->ret; in get_return_for_leaf()
464 ring_iter = trace_buffer_iter(iter, iter->cpu); in get_return_for_leaf()
474 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, in get_return_for_leaf()
476 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu, in get_return_for_leaf()
490 data->ent = *curr; in get_return_for_leaf()
496 if (next->ent.type == TRACE_GRAPH_RET) in get_return_for_leaf()
497 data->ret = *next; in get_return_for_leaf()
499 data->ret.ent.type = next->ent.type; in get_return_for_leaf()
503 if (next->ent.type != TRACE_GRAPH_RET) in get_return_for_leaf()
506 if (curr->ent.pid != next->ent.pid || in get_return_for_leaf()
507 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
533 usecs = iter->ts - iter->array_buffer->time_start; in print_graph_rel_time()
543 struct trace_array *tr = iter->tr; in print_graph_irq()
544 struct trace_seq *s = &iter->seq; in print_graph_irq()
545 struct trace_entry *ent = iter->ent; in print_graph_irq()
547 addr += iter->tr->text_delta; in print_graph_irq()
553 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_graph_irq()
556 print_graph_abs_time(iter->ts, s); in print_graph_irq()
573 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in print_graph_irq()
608 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); in trace_print_graph_duration()
627 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) in print_graph_duration()
668 /* sign extension */ in print_graph_retval()
710 struct fgraph_data *data = iter->private; in print_graph_entry_leaf()
711 struct trace_array *tr = iter->tr; in print_graph_entry_leaf()
716 int cpu = iter->cpu; in print_graph_entry_leaf()
719 graph_ret = &ret_entry->ret; in print_graph_entry_leaf()
720 call = &entry->graph_ent; in print_graph_entry_leaf()
721 duration = graph_ret->rettime - graph_ret->calltime; in print_graph_entry_leaf()
723 func = call->func + iter->tr->text_delta; in print_graph_entry_leaf()
728 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf()
735 cpu_data->depth = call->depth - 1; in print_graph_entry_leaf()
738 if (call->depth < FTRACE_RETFUNC_DEPTH && in print_graph_entry_leaf()
739 !WARN_ON_ONCE(call->depth < 0)) in print_graph_entry_leaf()
740 cpu_data->enter_funcs[call->depth] = 0; in print_graph_entry_leaf()
747 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) in print_graph_entry_leaf()
751 * Write out the function return value if the option function-retval is in print_graph_entry_leaf()
755 print_graph_retval(s, graph_ret->retval, true, (void *)func, in print_graph_entry_leaf()
760 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, in print_graph_entry_leaf()
761 cpu, iter->ent->pid, flags); in print_graph_entry_leaf()
771 struct ftrace_graph_ent *call = &entry->graph_ent; in print_graph_entry_nested()
772 struct fgraph_data *data = iter->private; in print_graph_entry_nested()
773 struct trace_array *tr = iter->tr; in print_graph_entry_nested()
779 int cpu = iter->cpu; in print_graph_entry_nested()
781 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested()
782 cpu_data->depth = call->depth; in print_graph_entry_nested()
785 if (call->depth < FTRACE_RETFUNC_DEPTH && in print_graph_entry_nested()
786 !WARN_ON_ONCE(call->depth < 0)) in print_graph_entry_nested()
787 cpu_data->enter_funcs[call->depth] = call->func; in print_graph_entry_nested()
794 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) in print_graph_entry_nested()
797 func = call->func + iter->tr->text_delta; in print_graph_entry_nested()
815 struct fgraph_data *data = iter->private; in print_graph_prologue()
816 struct trace_entry *ent = iter->ent; in print_graph_prologue()
817 struct trace_array *tr = iter->tr; in print_graph_prologue()
818 int cpu = iter->cpu; in print_graph_prologue()
821 verif_pid(s, ent->pid, cpu, data); in print_graph_prologue()
825 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); in print_graph_prologue()
827 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) in print_graph_prologue()
832 print_graph_abs_time(iter->ts, s); in print_graph_prologue()
844 print_graph_proc(s, ent->pid); in print_graph_prologue()
849 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in print_graph_prologue()
859 * - we are inside irq code
860 * - we just entered irq code
863 * - funcgraph-interrupts option is set
864 * - we are not inside irq code
870 int cpu = iter->cpu; in check_irq_entry()
872 struct fgraph_data *data = iter->private; in check_irq_entry()
874 addr += iter->tr->text_delta; in check_irq_entry()
885 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry()
908 * - we are inside irq code
909 * - we just left irq code
912 * - funcgraph-interrupts option is set
913 * - we are not inside irq code
918 int cpu = iter->cpu; in check_irq_return()
920 struct fgraph_data *data = iter->private; in check_irq_return()
931 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return()
936 if (*depth_irq == -1) in check_irq_return()
941 * Let's not trace it and clear the entry depth, since in check_irq_return()
949 *depth_irq = -1; in check_irq_return()
963 struct fgraph_data *data = iter->private; in print_graph_entry()
964 struct ftrace_graph_ent *call = &field->graph_ent; in print_graph_entry()
967 int cpu = iter->cpu; in print_graph_entry()
969 if (check_irq_entry(iter, flags, call->func, call->depth)) in print_graph_entry()
972 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); in print_graph_entry()
985 if (s->full) { in print_graph_entry()
986 data->failed = 1; in print_graph_entry()
987 data->cpu = cpu; in print_graph_entry()
989 data->failed = 0; in print_graph_entry()
996 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, in print_graph_return() argument
1000 unsigned long long duration = trace->rettime - trace->calltime; in print_graph_return()
1001 struct fgraph_data *data = iter->private; in print_graph_return()
1002 struct trace_array *tr = iter->tr; in print_graph_return()
1004 pid_t pid = ent->pid; in print_graph_return()
1005 int cpu = iter->cpu; in print_graph_return()
1009 func = trace->func + iter->tr->text_delta; in print_graph_return()
1011 if (check_irq_return(iter, flags, trace->depth)) in print_graph_return()
1016 int cpu = iter->cpu; in print_graph_return()
1018 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return()
1025 cpu_data->depth = trace->depth - 1; in print_graph_return()
1027 if (trace->depth < FTRACE_RETFUNC_DEPTH && in print_graph_return()
1028 !WARN_ON_ONCE(trace->depth < 0)) { in print_graph_return()
1029 if (cpu_data->enter_funcs[trace->depth] != trace->func) in print_graph_return()
1031 cpu_data->enter_funcs[trace->depth] = 0; in print_graph_return()
1041 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) in print_graph_return()
1046 * function-retval option is enabled. in print_graph_return()
1049 print_graph_retval(s, trace->retval, false, (void *)func, in print_graph_return()
1057 * that if the funcgraph-tail option is enabled. in print_graph_return()
1068 trace->overrun); in print_graph_return()
1070 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, in print_graph_return()
1080 struct trace_array *tr = iter->tr; in print_graph_comment()
1081 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); in print_graph_comment()
1082 struct fgraph_data *data = iter->private; in print_graph_comment()
1089 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment()
1104 switch (iter->ent->type) { in print_graph_comment()
1121 event = ftrace_find_event(ent->type); in print_graph_comment()
1125 ret = event->funcs->trace(iter, sym_flags, event); in print_graph_comment()
1134 if (s->buffer[s->seq.len - 1] == '\n') { in print_graph_comment()
1135 s->buffer[s->seq.len - 1] = '\0'; in print_graph_comment()
1136 s->seq.len--; in print_graph_comment()
1149 struct fgraph_data *data = iter->private; in print_graph_function_flags()
1150 struct trace_entry *entry = iter->ent; in print_graph_function_flags()
1151 struct trace_seq *s = &iter->seq; in print_graph_function_flags()
1152 int cpu = iter->cpu; in print_graph_function_flags()
1155 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { in print_graph_function_flags()
1156 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; in print_graph_function_flags()
1164 if (data && data->failed) { in print_graph_function_flags()
1165 field = &data->ent; in print_graph_function_flags()
1166 iter->cpu = data->cpu; in print_graph_function_flags()
1168 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { in print_graph_function_flags()
1169 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; in print_graph_function_flags()
1172 iter->cpu = cpu; in print_graph_function_flags()
1176 switch (entry->type) { in print_graph_function_flags()
1192 return print_graph_return(&field->ret, s, entry, iter, flags); in print_graph_function_flags()
1196 /* dont trace stack and functions as comments */ in print_graph_function_flags()
1235 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); in print_lat_header()
1236 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); in print_lat_header()
1237 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); in print_lat_header()
1238 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); in print_lat_header()
1245 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; in __print_graph_headers_flags()
1290 struct trace_iterator *iter = s->private; in print_graph_headers_flags()
1291 struct trace_array *tr = iter->tr; in print_graph_headers_flags()
1293 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) in print_graph_headers_flags()
1296 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { in print_graph_headers_flags()
1309 /* pid and depth on the last trace processed */ in graph_trace_open()
1314 iter->private = NULL; in graph_trace_open()
1323 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); in graph_trace_open()
1324 if (!data->cpu_data) in graph_trace_open()
1328 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in graph_trace_open()
1329 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); in graph_trace_open()
1330 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); in graph_trace_open()
1331 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in graph_trace_open()
1333 *pid = -1; in graph_trace_open()
1336 *depth_irq = -1; in graph_trace_open()
1339 iter->private = data; in graph_trace_open()
1351 struct fgraph_data *data = iter->private; in graph_trace_close()
1354 free_percpu(data->cpu_data); in graph_trace_close()
1375 .trace = print_graph_function_event,
1462 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); in init_graph_trace()
1465 pr_warn("Warning: could not register graph trace events\n"); in init_graph_trace()
1470 pr_warn("Warning: could not register graph trace events\n"); in init_graph_trace()