cregit-Linux how code gets into the kernel

Release 4.15 kernel/trace/trace.c

Directory: kernel/trace
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 Nadia Yvette Chambers
 */
#include <linux/ring_buffer.h>
#include <generated/utsrelease.h>
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/tracefs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/mount.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/trace.h>
#include <linux/sched/rt.h>

#include "trace.h"
#include "trace_output.h"

/*
 * On boot up, the ring buffer is set to the minimum size, so that
 * we do not waste memory on systems that are not using tracing.
 */

bool ring_buffer_expanded;

/*
 * We need to change this state when a selftest is running.
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
 * insertions into the ring-buffer such as trace_printk could occurred
 * at the same time, giving false positive or negative results.
 */

static bool __read_mostly tracing_selftest_running;

/*
 * If a tracer is running, we do not want to run SELFTEST.
 */

bool __read_mostly tracing_selftest_disabled;

/* Pipe tracepoints to printk */

struct trace_iterator *tracepoint_print_iter;

int tracepoint_printk;
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);

/* For tracers that don't implement custom flags */

static struct tracer_opt dummy_tracer_opt[] = {
	{ }
};


static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker1878.26%150.00%
Steven Rostedt521.74%150.00%
Total23100.00%2100.00%

/* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ static DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot;
static int __init set_cmdline_ftrace(char *str) { strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = true; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1753.12%114.29%
Steven Rostedt1237.50%342.86%
Frédéric Weisbecker13.12%114.29%
Li Zefan13.12%114.29%
Chen Gang S13.12%114.29%
Total32100.00%7100.00%

__setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker3666.67%150.00%
Steven Rostedt1833.33%150.00%
Total54100.00%2100.00%

__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Luis Claudio R. Goncalves2255.00%150.00%
Steven Rostedt1845.00%150.00%
Total40100.00%2100.00%

__setup("traceoff_on_warning", stop_trace_on_warning);
static int __init boot_alloc_snapshot(char *str) { allocate_snapshot = true; /* We also need the main ring buffer expanded */ ring_buffer_expanded = true; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt23100.00%2100.00%
Total23100.00%2100.00%

__setup("alloc_snapshot", boot_alloc_snapshot); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2295.65%150.00%
Chen Gang S14.35%150.00%
Total23100.00%2100.00%

__setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata;
static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt27100.00%1100.00%
Total27100.00%1100.00%

__setup("trace_clock=", set_trace_boot_clock);
static int __init set_tracepoint_printk(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt40100.00%1100.00%
Total40100.00%1100.00%

__setup("tp_printk", set_tracepoint_printk);
unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2083.33%133.33%
Lai Jiangshan312.50%133.33%
Thomas Gleixner14.17%133.33%
Total24100.00%3100.00%

/* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; LIST_HEAD(ftrace_trace_arrays);
int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; int ret = -ENODEV; mutex_lock(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; ret = 0; break; } } mutex_unlock(&trace_types_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt64100.00%1100.00%
Total64100.00%1100.00%


static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt24100.00%1100.00%
Total24100.00%1100.00%


void trace_array_put(struct trace_array *this_tr) { mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt27100.00%1100.00%
Total27100.00%1100.00%


int call_filter_check_discard(struct trace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && !filter_match_preds(call->filter, rec)) { __trace_event_discard_commit(buffer, event); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Zanussi5896.67%250.00%
Steven Rostedt23.33%250.00%
Total60100.00%4100.00%


void trace_free_pid_list(struct trace_pid_list *pid_list) { vfree(pid_list->pids); kfree(pid_list); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt22100.00%1100.00%
Total22100.00%1100.00%

/** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. */
bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { /* * If pid_max changed after filtered_pids was created, we * by default ignore all pids greater than the previous pid_max. */ if (search_pid >= filtered_pids->pid_max) return false; return test_bit(search_pid, filtered_pids->pids); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt35100.00%1100.00%
Total35100.00%1100.00%

/** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */
bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* * Return false, because if filtered_pids does not exist, * all pids are good to trace. */ if (!filtered_pids) return false; return !trace_find_filtered_pid(filtered_pids, task->pid); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt35100.00%1100.00%
Total35100.00%1100.00%

/** * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */
void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* Sorry, but we don't support pid_max changing after setting */ if (task->pid >= pid_list->pid_max) return; /* "self" is set for forks, and NULL for exits */ if (self) set_bit(task->pid, pid_list->pids); else clear_bit(task->pid, pid_list->pids); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt86100.00%1100.00%
Total86100.00%1100.00%

/** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { unsigned long pid = (unsigned long)v; (*pos)++; /* pid already is +1 of the actual prevous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ if (pid < pid_list->pid_max) return (void *)(pid + 1); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt74100.00%1100.00%
Total74100.00%1100.00%

/** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; loff_t l = 0; pid = find_first_bit(pid_list->pids, pid_list->pid_max); if (pid >= pid_list->pid_max) return NULL; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt89100.00%1100.00%
Total89100.00%1100.00%

/** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */
int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt38100.00%1100.00%
Total38100.00%1100.00%

/* 128 should be much more than enough */ #define PID_BUF_SIZE 127
int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret = 0; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return -ENOMEM; pid_list->pid_max = READ_ONCE(pid_max); /* Only truncating will shrink pid_max */ if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) pid_list->pid_max = filtered_pids->pid_max; pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); if (!pid_list->pids) { kfree(pid_list); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ for_each_set_bit(pid, filtered_pids->pids, filtered_pids->pid_max) { set_bit(pid, pid_list->pids); nr_pids++; } } while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0 || !trace_parser_loaded(&parser)) break; read += ret; ubuf += ret; cnt -= ret; parser.buffer[parser.idx] = 0; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; if (val >= pid_list->pid_max) break; pid = (pid_t)val; set_bit(pid, pid_list->pids); nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_free_pid_list(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_free_pid_list(pid_list); read = ret; pid_list = NULL; } *new_pid_list = pid_list; return read; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt36098.63%150.00%
Wei Yongjun51.37%150.00%
Total365100.00%2100.00%


static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer, cpu); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3156.36%228.57%
Alexander Z Lam1221.82%114.29%
Ingo Molnar1018.18%228.57%
Fabian Frederick11.82%114.29%
Thomas Gleixner11.82%114.29%
Total55100.00%7100.00%


u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Z Lam1894.74%150.00%
Thomas Gleixner15.26%150.00%
Total19100.00%2100.00%

/** * tracing_is_enabled - Show if global_trace has been disabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */
int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt17100.00%3100.00%
Total17100.00%3100.00%

/* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan4695.83%150.00%
Steven Rostedt24.17%150.00%
Total48100.00%2100.00%


static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan4397.73%150.00%
Steven Rostedt12.27%150.00%
Total44100.00%2100.00%


static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan27100.00%1100.00%
Total27100.00%1100.00%

#else static DEFINE_MUTEX(access_lock);
static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan21100.00%1100.00%
Total21100.00%1100.00%


static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan21100.00%1100.00%
Total21100.00%1100.00%


static inline void trace_access_lock_init(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan8100.00%1100.00%
Total8100.00%1100.00%

#endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else
static inline void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt26100.00%1100.00%
Total26100.00%1100.00%


static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt31100.00%3100.00%
Total31100.00%3100.00%

#endif
static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned long flags, int pc) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt47100.00%2100.00%
Total47100.00%2100.00%

static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, flags, pc); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_on(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */
void tracing_on(void) { tracer_tracing_on(&global_trace); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt13100.00%2100.00%
Total13100.00%2100.00%

EXPORT_SYMBOL_GPL(tracing_on);
static __always_inline void __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); } else ring_buffer_unlock_commit(buffer, event); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt7098.59%150.00%
Joel Fernandes11.41%150.00%
Total71100.00%2100.00%

/** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */
int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return size; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt18684.93%975.00%
Jovi Zhangwei3214.61%216.67%
Li Zefan10.46%18.33%
Total219100.00%12100.00%

EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */
int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(