cregit-Linux how code gets into the kernel

Release 4.15 kernel/trace/ftrace.c

Directory: kernel/trace
/*
 * Infrastructure for profiling code inserted by 'gcc -pg'.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally ported from the -rt patch by:
 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 Nadia Yvette Chambers
 */

#include <linux/stop_machine.h>
#include <linux/clocksource.h>
#include <linux/sched/task.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <linux/tracefs.h>
#include <linux/hardirq.h>
#include <linux/kthread.h>
#include <linux/uaccess.h>
#include <linux/bsearch.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/sort.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>

#include <trace/events/sched.h>

#include <asm/sections.h>
#include <asm/setup.h>

#include "trace_output.h"
#include "trace_stat.h"


#define FTRACE_WARN_ON(cond)			\
	({                                      \
                int ___r = cond;                \
                if (WARN_ON(___r))              \
                        ftrace_kill();          \
                ___r;                           \
        })


#define FTRACE_WARN_ON_ONCE(cond)		\
	({                                      \
                int ___r = cond;                \
                if (WARN_ON_ONCE(___r))         \
                        ftrace_kill();          \
                ___r;                           \
        })

/* hash bits for specific function selection */

#define FTRACE_HASH_BITS 7

#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)

#define FTRACE_HASH_DEFAULT_BITS 10

#define FTRACE_HASH_MAX_BITS 12

#ifdef CONFIG_DYNAMIC_FTRACE

#define INIT_OPS_HASH(opsname)	\
	.func_hash              = &opsname.local_hash,                  \
        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),

#define ASSIGN_OPS_HASH(opsname, val) \
	.func_hash              = val, \
        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
#else

#define INIT_OPS_HASH(opsname)

#define ASSIGN_OPS_HASH(opsname, val)
#endif


static struct ftrace_ops ftrace_list_end __read_mostly = {
	.func		= ftrace_stub,
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
	INIT_OPS_HASH(ftrace_list_end)
};

/* ftrace_enabled is a method to turn ftrace on or off */

int ftrace_enabled __read_mostly;

static int last_ftrace_enabled;

/* Current function tracing op */

struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
/* What to set function_trace_op to */

static struct ftrace_ops *set_function_trace_op;


static bool ftrace_pids_enabled(struct ftrace_ops *ops) { struct trace_array *tr; if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) return false; tr = ops->private; return tr->function_pids != NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt48100.00%2100.00%
Total48100.00%2100.00%

static void ftrace_update_trampoline(struct ftrace_ops *ops); /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. */ static int ftrace_disabled __read_mostly; static DEFINE_MUTEX(ftrace_lock); static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; #if ARCH_SUPPORTS_FTRACE_OPS static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs); #else /* See comment below, where ftrace_ops_list_func is defined */ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) #endif /* * Traverse the ftrace_global_list, invoking all entries. The reason that we * can use rcu_dereference_raw_notrace() is that elements removed from this list * are simply leaked, so there is no need to interact with a grace-period * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle * concurrent insertions into the ftrace_global_list. * * Silly Alpha and silly pointer-speculation compiler optimizations! */ #define do_for_each_ftrace_op(op, list) \ op = rcu_dereference_raw_notrace(list); \ do /* * Optimized for just a single item in the list (as that is the normal case). */ #define while_for_each_ftrace_op(op) \ while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ unlikely((op) != &ftrace_list_end))
static inline void ftrace_ops_init(struct ftrace_ops *ops) { #ifdef CONFIG_DYNAMIC_FTRACE if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { mutex_init(&ops->local_hash.regex_lock); ops->func_hash = &ops->local_hash; ops->flags |= FTRACE_OPS_FL_INITIALIZED; } #endif }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu4480.00%150.00%
Steven Rostedt1120.00%150.00%
Total55100.00%2100.00%

/** * ftrace_nr_registered_ops - return number of ops registered * * Returns the number of ftrace_ops registered and tracing functions */
int ftrace_nr_registered_ops(void) { struct ftrace_ops *ops; int cnt = 0; mutex_lock(&ftrace_lock); for (ops = rcu_dereference_protected(ftrace_ops_list, lockdep_is_held(&ftrace_lock)); ops != &ftrace_list_end; ops = rcu_dereference_protected(ops->next, lockdep_is_held(&ftrace_lock))) cnt++; mutex_unlock(&ftrace_lock); return cnt; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt5274.29%150.00%
Chunyan Zhang1825.71%150.00%
Total70100.00%2100.00%


static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) { struct trace_array *tr = op->private; if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) return; op->saved_func(ip, parent_ip, op, regs); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt62100.00%6100.00%
Total62100.00%6100.00%

/** * clear_ftrace_function - reset the ftrace function * * This NULLs the ftrace function and in essence stops * tracing. There may be lag */
void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt11100.00%2100.00%
Total11100.00%2100.00%


static void ftrace_sync(struct work_struct *work) { /* * This function is just a stub to implement a hard force * of synchronize_sched(). This requires synchronizing * tasks even in userspace and idle. * * Yes, function tracing is rude. */ }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt12100.00%1100.00%
Total12100.00%1100.00%


static void ftrace_sync_ipi(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt14100.00%1100.00%
Total14100.00%1100.00%

#ifdef CONFIG_FUNCTION_GRAPH_TRACER static void update_function_graph_func(void); /* Both enabled by default (can be cleared by function_graph tracer flags */ static bool fgraph_sleep_time = true; static bool fgraph_graph_time = true; #else
static inline void update_function_graph_func(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt8100.00%1100.00%
Total8100.00%1100.00%

#endif
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) { /* * If this is a dynamic, RCU, or per CPU ops, or we force list func, * then it needs to call the list anyway. */ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) return ftrace_ops_list_func; return ftrace_ops_get_func(ops); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt35100.00%2100.00%
Total35100.00%2100.00%


static void update_ftrace_function(void) { ftrace_func_t func; /* * Prepare the ftrace_ops that the arch callback will use. * If there's only one ftrace_ops registered, the ftrace_ops_list * will point to the ops we want. */ set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, lockdep_is_held(&ftrace_lock)); /* If there's no ftrace_ops registered, just call the stub function */ if (set_function_trace_op == &ftrace_list_end) { func = ftrace_stub; /* * If we are at the end of the list and this ops is * recursion safe and not dynamic and the arch supports passing ops, * then have the mcount trampoline call the function directly. */ } else if (rcu_dereference_protected(ftrace_ops_list->next, lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { func = ftrace_ops_get_list_func(ftrace_ops_list); } else { /* Just use the default ftrace_ops */ set_function_trace_op = &ftrace_list_end; func = ftrace_ops_list_func; } update_function_graph_func(); /* If there's no change, then do nothing more here */ if (ftrace_trace_function == func) return; /* * If we are using the list function, it doesn't care * about the function_trace_ops. */ if (func == ftrace_ops_list_func) { ftrace_trace_function = func; /* * Don't even bother setting function_trace_ops, * it would be racy to do so anyway. */ return; } #ifndef CONFIG_DYNAMIC_FTRACE /* * For static tracing, we need to be a bit more careful. * The function change takes affect immediately. Thus, * we need to coorditate the setting of the function_trace_ops * with the setting of the ftrace_trace_function. * * Set the function to the list ops, which will call the * function we want, albeit indirectly, but it handles the * ftrace_ops and doesn't depend on function_trace_op. */ ftrace_trace_function = ftrace_ops_list_func; /* * Make sure all CPUs see this. Yes this is slow, but static * tracing is slow and nasty to have enabled. */ schedule_on_each_cpu(ftrace_sync); /* Now all cpus are using the list ops. */ function_trace_op = set_function_trace_op; /* Make sure the function_trace_op is visible on all CPUs */ smp_wmb(); /* Nasty way to force a rmb on all cpus */ smp_call_function(ftrace_sync_ipi, NULL, 1); /* OK, we are all set to update the ftrace_trace_function now! */ #endif /* !CONFIG_DYNAMIC_FTRACE */ ftrace_trace_function = func; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt12987.16%1292.31%
Chunyan Zhang1912.84%17.69%
Total148100.00%13100.00%


int using_ftrace_ops_list_func(void) { return ftrace_trace_function == ftrace_ops_list_func; }

Contributors

PersonTokensPropCommitsCommitProp
Jiaxing Wang12100.00%1100.00%
Total12100.00%1100.00%


static void add_ftrace_ops(struct ftrace_ops __rcu **list, struct ftrace_ops *ops) { rcu_assign_pointer(ops->next, *list); /* * We are entering ops into the list but another * CPU might be walking that list. We need to make sure * the ops->next pointer is valid before another CPU sees * the ops pointer included into the list. */ rcu_assign_pointer(*list, ops); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3183.78%360.00%
Chunyan Zhang513.51%120.00%
Arnaldo Carvalho de Melo12.70%120.00%
Total37100.00%5100.00%


static int remove_ftrace_ops(struct ftrace_ops __rcu **list, struct ftrace_ops *ops) { struct ftrace_ops **p; /* * If we are removing the last function, then simply point * to the ftrace_stub. */ if (rcu_dereference_protected(*list, lockdep_is_held(&ftrace_lock)) == ops && rcu_dereference_protected(ops->next, lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { *list = &ftrace_list_end; return 0; } for (p = list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break; if (*p != ops) return -1; *p = (*p)->next; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo8065.57%120.00%
Steven Rostedt2318.85%360.00%
Chunyan Zhang1915.57%120.00%
Total122100.00%5100.00%

static void ftrace_update_trampoline(struct ftrace_ops *ops);
static int __register_ftrace_function(struct ftrace_ops *ops) { if (ops->flags & FTRACE_OPS_FL_DELETED) return -EINVAL; if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) return -EBUSY; #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS /* * If the ftrace_ops specifies SAVE_REGS, then it only can be used * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) return -EINVAL; if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) ops->flags |= FTRACE_OPS_FL_SAVE_REGS; #endif if (!core_kernel_data((unsigned long)ops)) ops->flags |= FTRACE_OPS_FL_DYNAMIC; add_ftrace_ops(&ftrace_ops_list, ops); /* Always save the function, and reset at unregistering */ ops->saved_func = ops->func; if (ftrace_pids_enabled(ops)) ops->func = ftrace_pid_func; ftrace_update_trampoline(ops); if (ftrace_enabled) update_ftrace_function(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt14199.30%888.89%
Masami Hiramatsu10.70%111.11%
Total142100.00%9100.00%


static int __unregister_ftrace_function(struct ftrace_ops *ops) { int ret; if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) return -EBUSY; ret = remove_ftrace_ops(&ftrace_ops_list, ops); if (ret < 0) return ret; if (ftrace_enabled) update_ftrace_function(); ops->func = ops->saved_func; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt6188.41%777.78%
Arnaldo Carvalho de Melo45.80%111.11%
Jiri Olsa45.80%111.11%
Total69100.00%9100.00%


static void ftrace_update_pid_func(void) { struct ftrace_ops *op; /* Only do something if we are tracing something */ if (ftrace_trace_function == ftrace_stub) return; do_for_each_ftrace_op(op, ftrace_ops_list) { if (op->flags & FTRACE_OPS_FL_PID) { op->func = ftrace_pids_enabled(op) ? ftrace_pid_func : op->saved_func; ftrace_update_trampoline(op); } } while_for_each_ftrace_op(op); update_ftrace_function(); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt6698.51%480.00%
Motohiro Kosaki11.49%120.00%
Total67100.00%5100.00%

#ifdef CONFIG_FUNCTION_PROFILER struct ftrace_profile { struct hlist_node node; unsigned long ip; unsigned long counter; #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned long long time; unsigned long long time_squared; #endif }; struct ftrace_profile_page { struct ftrace_profile_page *next; unsigned long index; struct ftrace_profile records[]; }; struct ftrace_profile_stat { atomic_t disabled; struct hlist_head *hash; struct ftrace_profile_page *pages; struct ftrace_profile_page *start; struct tracer_stat stat; }; #define PROFILE_RECORDS_SIZE \ (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) #define PROFILES_PER_PAGE \ (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) static int ftrace_profile_enabled __read_mostly; /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ static DEFINE_MUTEX(ftrace_profile_lock); static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); #define FTRACE_PROFILE_HASH_BITS 10 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
static void * function_stat_next(void *v, int idx) { struct ftrace_profile *rec = v; struct ftrace_profile_page *pg; pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); again: if (idx != 0) rec++; if ((void *)rec >= (void *)&pg->records[pg->index]) { pg = pg->next; if (!pg) return NULL; rec = &pg->records[0]; if (!rec->counter) goto again; } return rec; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt10994.78%266.67%
Li Zefan65.22%133.33%
Total115100.00%3100.00%


static void *function_stat_start(struct tracer_stat *trace) { struct ftrace_profile_stat *stat = container_of(trace, struct ftrace_profile_stat, stat); if (!stat || !stat->start) return NULL; return function_stat_next(&stat->start->records[0], 0); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt56100.00%2100.00%
Total56100.00%2100.00%

#ifdef CONFIG_FUNCTION_GRAPH_TRACER /* function graph compares on total time */
static int function_stat_cmp(void *p1, void *p2) { struct ftrace_profile *a = p1; struct ftrace_profile *b = p2; if (a->time < b->time) return -1; if (a->time > b->time) return 1; else return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt59100.00%1100.00%
Total59100.00%1100.00%

#else /* not function graph compares against hits */
static int function_stat_cmp(void *p1, void *p2) { struct ftrace_profile *a = p1; struct ftrace_profile *b = p2; if (a->counter < b->counter) return -1; if (a->counter > b->counter) return 1; else return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt59100.00%2100.00%
Total59100.00%2100.00%

#endif
static int function_stat_headers(struct seq_file *m) { #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_puts(m, " Function " "Hit Time Avg s^2\n" " -------- " "--- ---- --- ---\n"); #else seq_puts(m, " Function Hit\n" " -------- ---\n"); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3589.74%360.00%
Rasmus Villemoes25.13%120.00%
Chase Douglas25.13%120.00%
Total39100.00%5100.00%


static int function_stat_show(struct seq_file *m, void *v) { struct ftrace_profile *rec = v; char str[KSYM_SYMBOL_LEN]; int ret = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER static struct trace_seq s; unsigned long long avg; unsigned long long stddev; #endif mutex_lock(&ftrace_profile_lock); /* we raced with function_profile_reset() */ if (unlikely(rec->counter == 0)) { ret = -EBUSY; goto out; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER avg = rec->time; do_div(avg, rec->counter); if (tracing_thresh && (avg < tracing_thresh)) goto out; #endif kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); seq_printf(m, " %-30.30s %10lu", str, rec->counter); #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_puts(m, " "); /* Sample standard deviation (s^2) */ if (rec->counter <= 1) stddev = 0; else { /* * Apply Welford's method: * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) */ stddev = rec->counter * rec->time_squared - rec->time * rec->time; /* * Divide only 1000 for ns^2 -> us^2 conversion. * trace_print_graph_duration will divide 1000 again. */ do_div(stddev, rec->counter * (rec->counter - 1) * 1000); } trace_seq_init(&s); trace_print_graph_duration(rec->time, &s); trace_seq_puts(&s, " "); trace_print_graph_duration(avg, &s); trace_seq_puts(&s, " "); trace_print_graph_duration(stddev, &s); trace_print_seq(m, &s); #endif seq_putc(m, '\n'); out: mutex_unlock(&ftrace_profile_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt13146.29%444.44%
Chase Douglas6322.26%111.11%
Li Zefan4214.84%111.11%
Umesh Tiwari3311.66%111.11%
Juri Lelli134.59%111.11%
Rasmus Villemoes10.35%111.11%
Total283100.00%9100.00%


static void ftrace_profile_reset(struct ftrace_profile_stat *stat) { struct ftrace_profile_page *pg; pg = stat->pages = stat->start; while (pg) { memset(pg->records, 0, PROFILE_RECORDS_SIZE); pg->index = 0; pg = pg->next; } memset(stat->hash, 0, FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt72100.00%3100.00%
Total72100.00%3100.00%


int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) { struct ftrace_profile_page *pg; int functions; int pages; int i; /* If we already allocated, do nothing */ if (stat->pages) return 0; stat->pages = (void *)get_zeroed_page(GFP_KERNEL); if (!stat->pages) return -ENOMEM; #ifdef CONFIG_DYNAMIC_FTRACE functions = ftrace_update_tot_cnt; #else /* * We do not know the number of functions that exist because * dynamic tracing is what counts them. With past experience * we have around 20K functions. That should be more than enough. * It is highly unlikely we will execute every function in * the kernel. */ functions = 20000; #endif pg = stat->start = stat->pages; pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); for (i = 1; i < pages; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); if (!pg->next) goto out_free; pg = pg->next; } return 0; out_free: pg = stat->start; while (pg) { unsigned long tmp = (unsigned long)pg; pg = pg->next; free_page(tmp); } stat->pages = NULL; stat->start = NULL; return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt19099.48%480.00%
Namhyung Kim10.52%120.00%
Total191100.00%5100.00%


static int ftrace_profile_init_cpu(int cpu) { struct ftrace_profile_stat *stat; int size; stat = &per_cpu(ftrace_profile_stats, cpu); if (stat->hash) { /* If the profile is already created, simply reset it */ ftrace_profile_reset(stat); return 0; } /* * We are profiling all functions, but usually only a few thousand * functions are hit. We'll make a hash of 1024 items. */ size = FTRACE_PROFILE_HASH_SIZE; stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); if (!stat->hash) return -ENOMEM; /* Preallocate the function profiling pages */ if (ftrace_profile_pages_init(stat) < 0) { kfree(stat->hash); stat->hash = NULL; return -ENOMEM; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt109100.00%4100.00%
Total109100.00%4100.00%


static int ftrace_profile_init(void) { int cpu; int ret = 0; for_each_possible_cpu(cpu) { ret = ftrace_profile_init_cpu(cpu); if (ret) break; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3697.30%150.00%
Miao Xie12.70%150.00%
Total37100.00%2100.00%

/* interrupts must be disabled */
static struct ftrace_profile * ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) { struct ftrace_profile *rec; struct hlist_head *hhd; unsigned long key; key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); hhd = &stat->hash[key]; if (hlist_empty(hhd)) return NULL; hlist_for_each_entry_rcu_notrace(rec, hhd, node) { if (rec->ip == ip) return rec; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt8398.81%480.00%
Namhyung Kim11.19%120.00%
Total84100.00%5100.00%


static void ftrace_add_profile(struct ftrace_profile_stat *stat, struct ftrace_profile *rec) { unsigned long key; key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); hlist_add_head_rcu(&rec->node, &stat->hash[key]); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt4697.87%266.67%
Namhyung Kim12.13%133.33%
Total47100.00%3100.00%

/* * The memory is already allocated, this simply finds a new record to use. */
static struct ftrace_profile * ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) { struct ftrace_profile *rec = NULL; /* prevent recursion (from NMIs) */ if (atomic_inc_return(&stat->disabled) != 1) goto out; /* * Try to find the function again since an NMI * could have added it */ rec = ftrace_find_profiled_func(stat, ip); if (rec) goto out; if (stat->pages->index == PROFILES_PER_PAGE) { if (!stat->pages->next) goto out; stat->pages = stat->pages->next; } rec = &stat->pages->records[stat->pages->index++]; rec->ip = ip; ftrace_add_profile(stat, rec); out: atomic_dec(&stat->disabled); return rec; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt134100.00%4100.00%
Total134100.00%4100.00%


static void function_profile_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops,