diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index 272464bb7c60249fe93e84a4b81a65c85fe654ce..2b74f96d09d531aef703ff40af345ea48fabe476 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -810,6 +810,12 @@ Here is the list of current tracers that may be configured. to draw a graph of function calls similar to C code source. + Note that the function graph calculates the timings of when the + function starts and returns internally and for each instance. If + there are two instances that run function graph tracer and traces + the same functions, the length of the timings may be slightly off as + each read the timestamp separately and not at the same time. + "blk" The block tracer. The tracer used by the blktrace user diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f8aebcb01e62cf5e97f98fbeddd862bcf2d9cb9b..b6e40e8791fa76c8f9867a6d2f3ed914d28e3f05 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4122,6 +4122,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) preempt_model_none() ? "server" : preempt_model_voluntary() ? "desktop" : preempt_model_full() ? "preempt" : + preempt_model_lazy() ? "lazy" : preempt_model_rt() ? "preempt_rt" : "unknown", /* These are reserved for later use */ diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index fce064e205706f50e3b64f345b53c0f23bb226ee..a4e799c1e7670a36d95f83c224db61084bd608e0 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -182,6 +182,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace, struct trace_array_cpu *data; unsigned long flags; unsigned int trace_ctx; + u64 *calltime; int ret; if (ftrace_graph_ignore_func(gops, trace)) @@ -199,6 +200,12 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace, if (!func_prolog_dec(tr, &data, &flags)) return 0; + calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime)); + if (!calltime) + return 0; + + *calltime = trace_clock_local(); + trace_ctx = tracing_gen_ctx_flags(flags); ret = __trace_graph_entry(tr, trace, trace_ctx); atomic_dec(&data->disabled); @@ -213,12 +220,19 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace, struct trace_array_cpu *data; unsigned long flags; unsigned int trace_ctx; + u64 *calltime; + int size; ftrace_graph_addr_finish(gops, trace); if (!func_prolog_dec(tr, &data, &flags)) return; + calltime = fgraph_retrieve_data(gops->idx, &size); + if (!calltime) + return; + trace->calltime = *calltime; + trace_ctx = tracing_gen_ctx_flags(flags); __trace_graph_return(tr, trace, trace_ctx); atomic_dec(&data->disabled); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index d6c7f18daa15abbeb3cc368946aba76b000e8df8..c58292e424d5601d978cfa816d770b5204448ee4 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -118,6 +118,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace, struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned int trace_ctx; + u64 *calltime; int ret = 0; if (ftrace_graph_ignore_func(gops, trace)) @@ -135,6 +136,12 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace, if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) return 0; + calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime)); + if (!calltime) + return 0; + + *calltime = trace_clock_local(); + ret = __trace_graph_entry(tr, trace, trace_ctx); atomic_dec(&data->disabled); preempt_enable_notrace(); @@ -148,12 +155,19 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace, struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned int trace_ctx; + u64 *calltime; + int size; ftrace_graph_addr_finish(gops, trace); if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) return; + calltime = fgraph_retrieve_data(gops->idx, &size); + if (!calltime) + return; + trace->calltime = *calltime; + __trace_graph_return(tr, trace, trace_ctx); atomic_dec(&data->disabled);