Release 4.15 kernel/trace/trace_functions_graph.c
// SPDX-License-Identifier: GPL-2.0
/*
*
* Function graph tracer.
* Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
* Mostly borrowed from function tracer which
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include "trace.h"
#include "trace_output.h"
static bool kill_ftrace_graph;
/**
* ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
*
* ftrace_graph_stop() is called when a severe error is detected in
* the function graph tracing. This function is called by the critical
* paths of function graph to keep those paths from doing any more harm.
*/
bool ftrace_graph_is_dead(void)
{
return kill_ftrace_graph;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
/**
* ftrace_graph_stop - set to permanently disable function graph tracincg
*
* In case of an error int function graph tracing, this is called
* to try to keep function graph tracing from causing any more harm.
* Usually this is pretty severe and this is called to try to at least
* get a warning out to the user.
*/
void ftrace_graph_stop(void)
{
kill_ftrace_graph = true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;
struct fgraph_cpu_data {
pid_t last_pid;
int depth;
int depth_irq;
int ignore;
unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
};
struct fgraph_data {
struct fgraph_cpu_data __percpu *cpu_data;
/* Place to preserve last processed entry. */
struct ftrace_graph_ent_entry ent;
struct ftrace_graph_ret_entry ret;
int failed;
int cpu;
};
#define TRACE_GRAPH_INDENT 2
unsigned int fgraph_max_depth;
static struct tracer_opt trace_opts[] = {
/* Display overruns? (for self-debug purpose) */
{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
/* Display CPU ? */
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
/* Display Overhead ? */
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
/* Display proc name/pid */
{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
/* Display duration of execution */
{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
/* Display absolute time of an entry */
{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
/* Display interrupts */
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
/* Display function name after trailing } */
{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
/* Include sleep time (scheduled out) between entry and return */
{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
/* Include time within nested functions */
{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
/* Don't display overruns, proc, or tail by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
.opts = trace_opts
};
static struct trace_array *graph_array;
/*
* DURATION column is being also used to display IRQ signs,
* following values are used by print_graph_irq and others
* to fill in space into DURATION column.
*/
enum {
FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
};
static void
print_graph_duration(struct trace_array *tr, unsigned long long duration,
struct trace_seq *s, u32 flags);
/* Add a function return address to the trace stack on thread info.*/
int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer, unsigned long *retp)
{
unsigned long long calltime;
int index;
if (unlikely(ftrace_graph_is_dead()))
return -EBUSY;
if (!current->ret_stack)
return -EBUSY;
/*
* We must make sure the ret_stack is tested before we read
* anything else.
*/
smp_rmb();
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(¤t->trace_overrun);
return -EBUSY;
}
/*
* The curr_ret_stack is an index to ftrace return stack of
* current task. Its value should be in [0, FTRACE_RETFUNC_
* DEPTH) when the function graph tracer is used. To support
* filtering out specific functions, it makes the index
* negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
* so when it sees a negative index the ftrace will ignore
* the record. And the index gets recovered when returning
* from the filtered function by adding the FTRACE_NOTRACE_
* DEPTH and then it'll continue to record functions normally.
*
* The curr_ret_stack is initialized to -1 and get increased
* in this function. So it can be less than -1 only if it was
* filtered out via ftrace_graph_notrace_addr() which can be
* set from set_graph_notrace file in tracefs by user.
*/
if (current->curr_ret_stack < -1)
return -EBUSY;
calltime = trace_clock_local();
index = ++current->curr_ret_stack;
if (ftrace_graph_notrace_addr(func))
current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
current->ret_stack[index].fp = frame_pointer;
#endif
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
current->ret_stack[index].retp = retp;
#endif
*depth = current->curr_ret_stack;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 148 | 72.91% | 6 | 66.67% |
Namhyung Kim | 29 | 14.29% | 1 | 11.11% |
Josh Poimboeuf | 26 | 12.81% | 2 | 22.22% |
Total | 203 | 100.00% | 9 | 100.00% |
/* Retrieve a function return address to the trace stack on thread info.*/
static void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
unsigned long frame_pointer)
{
int index;
index = current->curr_ret_stack;
/*
* A negative index here means that it's just returned from a
* notrace'd function. Recover index to get an original
* return address. See ftrace_push_return_trace().
*
* TODO: Need to check whether the stack gets corrupted.
*/
if (index < 0)
index += FTRACE_NOTRACE_DEPTH;
if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
*ret = (unsigned long)panic;
return;
}
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
/*
* The arch may choose to record the frame pointer used
* and check it here to make sure that it is what we expect it
* to be. If gcc does not set the place holder of the return
* address in the frame pointer, and does a copy instead, then
* the function graph trace will fail. This test detects this
* case.
*
* Currently, x86_32 with optimize for size (-Os) makes the latest
* gcc do the above.
*
* Note, -mfentry does not use frame pointers, and this test
* is not needed if CC_USING_FENTRY is set.
*/
if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
ftrace_graph_stop();
WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
" from func %ps return to %lx\n",
current->ret_stack[index].fp,
frame_pointer,
(void *)current->ret_stack[index].func,
current->ret_stack[index].ret);
*ret = (unsigned long)panic;
return;
}
#endif
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(¤t->trace_overrun);
trace->depth = index;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 190 | 91.35% | 5 | 71.43% |
Namhyung Kim | 15 | 7.21% | 1 | 14.29% |
Josh Poimboeuf | 3 | 1.44% | 1 | 14.29% |
Total | 208 | 100.00% | 7 | 100.00% |
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{
struct ftrace_graph_ret trace;
unsigned long ret;
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local();
barrier();
current->curr_ret_stack--;
/*
* The curr_ret_stack can be less than -1 only if it was
* filtered out and it's about to return from the function.
* Recover the index and continue to trace normal functions.
*/
if (current->curr_ret_stack < -1) {
current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
return ret;
}
/*
* The trace should run after decrementing the ret counter
* in case an interrupt were to come in. We don't want to
* lose the interrupt if max_depth is set.
*/
ftrace_graph_return(&trace);
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic. What else to do? */
ret = (unsigned long)panic;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 80 | 78.43% | 4 | 66.67% |
Namhyung Kim | 21 | 20.59% | 1 | 16.67% |
Frédéric Weisbecker | 1 | 0.98% | 1 | 16.67% |
Total | 102 | 100.00% | 6 | 100.00% |
/**
* ftrace_graph_ret_addr - convert a potentially modified stack return address
* to its original value
*
* This function can be called by stack unwinding code to convert a found stack
* return address ('ret') to its original value, in case the function graph
* tracer has modified it to be 'return_to_handler'. If the address hasn't
* been modified, the unchanged value of 'ret' is returned.
*
* 'idx' is a state variable which should be initialized by the caller to zero
* before the first call.
*
* 'retp' is a pointer to the return address on the stack. It's ignored if
* the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
*/
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp)
{
int index = task->curr_ret_stack;
int i;
if (ret != (unsigned long)return_to_handler)
return ret;
if (index < -1)
index += FTRACE_NOTRACE_DEPTH;
if (index < 0)
return ret;
for (i = 0; i <= index; i++)
if (task->ret_stack[i].retp == retp)
return task->ret_stack[i].ret;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 106 | 100.00% | 1 | 100.00% |
Total | 106 | 100.00% | 1 | 100.00% |
#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp)
{
int task_idx;
if (ret != (unsigned long)return_to_handler)
return ret;
task_idx = task->curr_ret_stack;
if (!task->ret_stack || task_idx < *idx)
return ret;
task_idx -= *idx;
(*idx)++;
return task->ret_stack[task_idx].ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 82 | 100.00% | 1 | 100.00% |
Total | 82 | 100.00% | 1 | 100.00% |
#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit_nostack(buffer, event);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 99 | 85.34% | 1 | 16.67% |
Steven Rostedt | 14 | 12.07% | 4 | 66.67% |
Tom Zanussi | 3 | 2.59% | 1 | 16.67% |
Total | 116 | 100.00% | 6 | 100.00% |
static inline int ftrace_graph_ignore_irqs(void)
{
if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
return 0;
return in_irq();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 26 | 100.00% | 2 | 100.00% |
Total | 26 | 100.00% | 2 | 100.00% |
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int ret;
int cpu;
int pc;
if (!ftrace_trace_task(tr))
return 0;
if (ftrace_graph_ignore_func(trace))
return 0;
if (ftrace_graph_ignore_irqs())
return 0;
/*
* Do not trace a function if it's filtered by set_graph_notrace.
* Make the index of ret stack negative to indicate that it should
* ignore further functions. But it needs its own ret stack entry
* to recover the original index in order to continue tracing after
* returning from the function.
*/
if (ftrace_graph_notrace_addr(trace->func))
return 1;
/*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
*/
if (tracing_thresh)
return 1;
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
ret = __trace_graph_entry(tr, trace, flags, pc);
} else {
ret = 0;
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 136 | 78.61% | 1 | 12.50% |
Steven Rostedt | 16 | 9.25% | 5 | 62.50% |
Namhyung Kim | 13 | 7.51% | 1 | 12.50% |
Joel A Fernandes | 8 | 4.62% | 1 | 12.50% |
Total | 173 | 100.00% | 8 | 100.00% |
static void
__trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long flags, int pc)
{
u64 time = trace_clock_local();
struct ftrace_graph_ent ent = {
.func = ip,
.depth = 0,
};
struct ftrace_graph_ret ret = {
.func = ip,
.depth = 0,
.calltime = time,
.rettime = time,
};
__trace_graph_entry(tr, &ent, flags, pc);
__trace_graph_return(tr, &ret, flags, pc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 94 | 100.00% | 1 | 100.00% |
Total | 94 | 100.00% | 1 | 100.00% |
void
trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc)
{
__trace_graph_function(tr, ip, flags, pc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit_nostack(buffer, event);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 94 | 84.68% | 1 | 16.67% |
Steven Rostedt | 14 | 12.61% | 4 | 66.67% |
Tom Zanussi | 3 | 2.70% | 1 | 16.67% |
Total | 111 | 100.00% | 6 | 100.00% |
void trace_graph_return(struct ftrace_graph_ret *trace)
{
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
__trace_graph_return(tr, trace, flags, pc);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 102 | 94.44% | 1 | 33.33% |
Steven Rostedt | 6 | 5.56% | 2 | 66.67% |
Total | 108 | 100.00% | 3 | 100.00% |
void set_graph_array(struct trace_array *tr)
{
graph_array = tr;
/* Make graph_array visible before we start tracing */
smp_mb();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;
else
trace_graph_return(trace);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tim Bird | 33 | 97.06% | 1 | 50.00% |
Steven Rostedt | 1 | 2.94% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static int graph_trace_init(struct trace_array *tr)
{
int ret;
set_graph_array(tr);
if (tracing_thresh)
ret = register_ftrace_graph(&trace_graph_thresh_return,
&trace_graph_entry);
else
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry);
if (ret)
return ret;
tracing_start_cmdline_record();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 30 | 50.85% | 4 | 57.14% |
Tim Bird | 15 | 25.42% | 1 | 14.29% |
Steven Rostedt | 13 | 22.03% | 1 | 14.29% |
Joel A Fernandes | 1 | 1.69% | 1 | 14.29% |
Total | 59 | 100.00% | 7 | 100.00% |
static void graph_trace_reset(struct trace_array *tr)
{
tracing_stop_cmdline_record();
unregister_ftrace_graph();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 14 | 82.35% | 1 | 50.00% |
Steven Rostedt | 3 | 17.65% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static int graph_trace_update_thresh(struct trace_array *tr)
{
graph_trace_reset(tr);
return graph_trace_init(tr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stanislav Fomichev | 21 | 95.45% | 1 | 50.00% |
Steven Rostedt | 1 | 4.55% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
static int max_bytes_for_cpu;
static void print_graph_cpu(struct trace_seq *s, int cpu)
{
/*
* Start with a space character - to make it stand out
* to the right a bit when trace output is pasted into
* email:
*/
trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 17 | 65.38% | 1 | 25.00% |
Ingo Molnar | 5 | 19.23% | 1 | 25.00% |
Steven Rostedt | 2 | 7.69% | 1 | 25.00% |
Lai Jiangshan | 2 | 7.69% | 1 | 25.00% |
Total | 26 | 100.00% | 4 | 100.00% |
#define TRACE_GRAPH_PROCINFO_LENGTH 14
static void print_graph_proc(struct trace_seq *s, pid_t pid)
{
char comm[TASK_COMM_LEN];
/* sign + log10(MAX_INT) + '\0' */
char pid_str[11];
int spaces = 0;
int len;
int i;
trace_find_cmdline(pid, comm);
comm[7] = '\0';
sprintf(pid_str, "%d", pid);
/* 1 stands for the "-" character */
len = strlen(comm) + strlen(pid_str) + 1;
if (len < TRACE_GRAPH_PROCINFO_LENGTH)
spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
/* First spaces to align center */
for (i = 0; i < spaces / 2; i++)
trace_seq_putc(s, ' ');
trace_seq_printf(s, "%s-%s", comm, pid_str);
/* Last spaces to align center */
for (i = 0; i < spaces - (spaces / 2); i++)
trace_seq_putc(s, ' ');
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 130 | 87.25% | 1 | 25.00% |
Steven Rostedt | 15 | 10.07% | 2 | 50.00% |
Jovi Zhangwei | 4 | 2.68% | 1 | 25.00% |
Total | 149 | 100.00% | 4 | 100.00% |
static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
trace_seq_putc(s, ' ');
trace_print_lat_fmt(s, entry);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 30 | 100.00% | 4 | 100.00% |
Total | 30 | 100.00% | 4 | 100.00% |
/* If the pid changed since the last trace, output this event */
static void
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
{
pid_t prev_pid;
pid_t *last_pid;
if (!data)
return;
last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
if (*last_pid == pid)
return;
prev_pid = *last_pid;
*last_pid = pid;
if (prev_pid == -1)
return;
/*
* Context-switch trace line:
------------------------------------------
| 1) migration/0--1 => sshd-1755
------------------------------------------
*/
trace_seq_puts(s, " ------------------------------------------\n");
print_graph_cpu(s, cpu);
print_graph_proc(s, prev_pid);
trace_seq_puts(s, " => ");
print_graph_proc(s, pid);
trace_seq_puts(s, "\n ------------------------------------------\n\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 76 | 63.33% | 6 | 50.00% |
Ingo Molnar | 21 | 17.50% | 1 | 8.33% |
Steven Rostedt | 18 | 15.00% | 3 | 25.00% |
Jovi Zhangwei | 3 | 2.50% | 1 | 8.33% |
Jiri Olsa | 2 | 1.67% | 1 | 8.33% |
Total | 120 | 100.00% | 12 | 100.00% |
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
struct fgraph_data *data = iter->private;
struct ring_buffer_iter *ring_iter = NULL;
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *next;
/*
* If the previous output failed to write to the seq buffer,
* then we just reuse the data from before.
*/
if (data && data->failed) {
curr = &data->ent;
next = &data->ret;
} else {
ring_iter = trace_buffer_iter(iter, iter->cpu);
/* First peek to compare current entry and the next one */
if (ring_iter)
event = ring_buffer_iter_peek(ring_iter, NULL);
else {
/*
* We need to consume the current entry to see
* the next one.
*/
ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL);
}
if (!event)
return NULL;
next = ring_buffer_event_data(event);
if (data) {
/*
* Save current and next entries for later reference
* if the output fails.
*/
data->ent = *curr;
/*
* If the next event is not a return type, then
* we only care about what type it is. Otherwise we can
* safely copy the entire event.
*/
if (next->ent.type == TRACE_GRAPH_RET)
data->ret = *next;
else
data->ret.ent.type = next->ent.type;
}
}
if (next->ent.type != TRACE_GRAPH_RET)
return NULL;
if (curr->ent.pid != next->ent.pid ||
curr->graph_ent.func != next->ret.func)
return NULL;
/* this is a leaf, now advance the iterator */
if (ring_iter)
ring_buffer_read(ring_iter, NULL);
return next;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 159 | 62.11% | 3 | 37.50% |
Jiri Olsa | 61 | 23.83% | 1 | 12.50% |
David Shaohua Li | 26 | 10.16% | 1 | 12.50% |
Steven Rostedt | 10 | 3.91% | 3 | 37.50% |
Total | 256 | 100.00% | 8 | 100.00% |
static void print_graph_abs_time(u64 t, struct trace_seq *s)
{
unsigned long usecs_rem;
usecs_rem = do_div(t, NSEC_PER_SEC);
usecs_rem /= 1000;
trace_seq_printf(s, "%5lu.%06lu | ",
(unsigned long)t, usecs_rem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 44 | 95.65% | 1 | 50.00% |
Steven Rostedt | 2 | 4.35% | 1 | 50.00% |
Total | 46 | 100.00% | 2 | 100.00% |
static void
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
enum trace_type type, int cpu, pid_t pid, u32 flags)
{
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent;
if (addr < (unsigned long)__irqentry_text_start ||
addr >= (unsigned long)__irqentry_text_end)
return;
if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
/* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
print_graph_abs_time(iter->ts, s);
/* Cpu */
if (flags & TRACE_GRAPH_PRINT_CPU)
print_graph_cpu(s, cpu);
/* Proc */
if (flags & TRACE_GRAPH_PRINT_PROC) {
print_graph_proc(s,