Release 4.15 kernel/trace/trace_events.c
/*
* event tracer
*
* Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
* - Added format output of fields of the trace point.
* This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
*
*/
#define pr_fmt(fmt) fmt
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/tracefs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <trace/events/sched.h>
#include <asm/setup.h>
#include "trace_output.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM "TRACE_SYSTEM"
DEFINE_MUTEX(event_mutex);
LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_generic_fields);
static LIST_HEAD(ftrace_common_fields);
#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
static struct kmem_cache *field_cachep;
static struct kmem_cache *file_cachep;
static inline int system_refcount(struct event_subsystem *system)
{
return system->ref_count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static int system_refcount_inc(struct event_subsystem *system)
{
return system->ref_count++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static int system_refcount_dec(struct event_subsystem *system)
{
return --system->ref_count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
/* Double loops, do not use break, only goto's work */
#define do_for_each_event_file(tr, file) \
list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
list_for_each_entry(file, &tr->events, list)
#define do_for_each_event_file_safe(tr, file) \
list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
struct trace_event_file *___n; \
list_for_each_entry_safe(file, ___n, &tr->events, list)
#define while_for_each_event_file() \
}
static struct list_head *
trace_get_fields(struct trace_event_call *event_call)
{
if (!event_call->class->get_fields)
return &event_call->class->fields;
return event_call->class->get_fields(event_call);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 39 | 97.50% | 2 | 66.67% |
Jovi Zhangwei | 1 | 2.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
static struct ftrace_event_field *
__find_event_field(struct list_head *head, char *name)
{
struct ftrace_event_field *field;
list_for_each_entry(field, head, link) {
if (!strcmp(field->name, name))
return field;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jovi Zhangwei | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
struct ftrace_event_field *
trace_find_event_field(struct trace_event_call *call, char *name)
{
struct ftrace_event_field *field;
struct list_head *head;
head = trace_get_fields(call);
field = __find_event_field(head, name);
if (field)
return field;
field = __find_event_field(&ftrace_generic_fields, name);
if (field)
return field;
return __find_event_field(&ftrace_common_fields, name);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jovi Zhangwei | 47 | 62.67% | 1 | 25.00% |
Daniel Wagner | 16 | 21.33% | 1 | 25.00% |
Steven Rostedt | 12 | 16.00% | 2 | 50.00% |
Total | 75 | 100.00% | 4 | 100.00% |
static int __trace_define_field(struct list_head *head, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type)
{
struct ftrace_event_field *field;
field = kmem_cache_alloc(field_cachep, GFP_TRACE);
if (!field)
return -ENOMEM;
field->name = name;
field->type = type;
if (filter_type == FILTER_OTHER)
field->filter_type = filter_assign_type(type);
else
field->filter_type = filter_type;
field->offset = offset;
field->size = size;
field->is_signed = is_signed;
list_add(&field->link, head);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 84 | 69.42% | 2 | 22.22% |
Li Zefan | 27 | 22.31% | 3 | 33.33% |
Namhyung Kim | 4 | 3.31% | 1 | 11.11% |
Steven Rostedt | 4 | 3.31% | 2 | 22.22% |
Frédéric Weisbecker | 2 | 1.65% | 1 | 11.11% |
Total | 121 | 100.00% | 9 | 100.00% |
int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size, int is_signed,
int filter_type)
{
struct list_head *head;
if (WARN_ON(!call->class))
return 0;
head = trace_get_fields(call);
return __trace_define_field(head, type, name, offset, size,
is_signed, filter_type);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 74 | 98.67% | 1 | 50.00% |
Steven Rostedt | 1 | 1.33% | 1 | 50.00% |
Total | 75 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(trace_define_field);
#define __generic_field(type, item, filter_type) \
ret = __trace_define_field(&ftrace_generic_fields, #type, \
#item, 0, 0, is_signed_type(type), \
filter_type); \
if (ret) \
return ret;
#define __common_field(type, item) \
ret = __trace_define_field(&ftrace_common_fields, #type, \
"common_" #item, \
offsetof(typeof(ent), item), \
sizeof(ent.item), \
is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
static int trace_define_generic_fields(void)
{
int ret;
__generic_field(int, CPU, FILTER_CPU);
__generic_field(int, cpu, FILTER_CPU);
__generic_field(char *, COMM, FILTER_COMM);
__generic_field(char *, comm, FILTER_COMM);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Wagner | 31 | 59.62% | 1 | 50.00% |
Steven Rostedt | 21 | 40.38% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static int trace_define_common_fields(void)
{
int ret;
struct trace_entry ent;
__common_field(unsigned short, type);
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 46 | 100.00% | 3 | 100.00% |
Total | 46 | 100.00% | 3 | 100.00% |
static void trace_destroy_fields(struct trace_event_call *call)
{
struct ftrace_event_field *field, *next;
struct list_head *head;
head = trace_get_fields(call);
list_for_each_entry_safe(field, next, head, link) {
list_del(&field->link);
kmem_cache_free(field_cachep, field);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 40 | 68.97% | 1 | 20.00% |
Steven Rostedt | 17 | 29.31% | 3 | 60.00% |
Jovi Zhangwei | 1 | 1.72% | 1 | 20.00% |
Total | 58 | 100.00% | 5 | 100.00% |
/*
* run-time version of trace_event_get_offsets_<call>() that returns the last
* accessible offset of trace fields excluding __dynamic_array bytes
*/
int trace_event_get_offsets(struct trace_event_call *call)
{
struct ftrace_event_field *tail;
struct list_head *head;
head = trace_get_fields(call);
/*
* head->next points to the last field with the largest offset,
* since it was added last by trace_define_field()
*/
tail = list_first_entry(head, struct ftrace_event_field, link);
return tail->offset + tail->size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
int trace_event_raw_init(struct trace_event_call *call)
{
int id;
id = register_trace_event(&call->event);
if (!id)
return -ENODEV;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 32 | 91.43% | 1 | 25.00% |
Steven Rostedt | 3 | 8.57% | 3 | 75.00% |
Total | 35 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(trace_event_raw_init);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
{
struct trace_array *tr = trace_file->tr;
struct trace_array_cpu *data;
struct trace_pid_list *pid_list;
pid_list = rcu_dereference_sched(tr->filtered_pids);
if (!pid_list)
return false;
data = this_cpu_ptr(tr->trace_buffer.data);
return data->ignore_pid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 62 | 100.00% | 1 | 100.00% |
Total | 62 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
struct trace_event_file *trace_file,
unsigned long len)
{
struct trace_event_call *event_call = trace_file->event_call;
if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
trace_event_ignore_this_pid(trace_file))
return NULL;
local_save_flags(fbuffer->flags);
fbuffer->pc = preempt_count();
/*
* If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
* preemption (adding one to the preempt_count). Since we are
* interested in the preempt_count at the time the tracepoint was
* hit, we need to subtract one to offset the increment.
*/
if (IS_ENABLED(CONFIG_PREEMPT))
fbuffer->pc--;
fbuffer->trace_file = trace_file;
fbuffer->event =
trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
event_call->event.type, len,
fbuffer->flags, fbuffer->pc);
if (!fbuffer->event)
return NULL;
fbuffer->entry = ring_buffer_event_data(fbuffer->event);
return fbuffer->entry;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 136 | 100.00% | 6 | 100.00% |
Total | 136 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
int trace_event_reg(struct trace_event_call *call,
enum trace_reg type, void *data)
{
struct trace_event_file *file = data;
WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
switch (type) {
case TRACE_REG_REGISTER:
return tracepoint_probe_register(call->tp,
call->class->probe,
file);
case TRACE_REG_UNREGISTER:
tracepoint_probe_unregister(call->tp,
call->class->probe,
file);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
return tracepoint_probe_register(call->tp,
call->class->perf_probe,
call);
case TRACE_REG_PERF_UNREGISTER:
tracepoint_probe_unregister(call->tp,
call->class->perf_probe,
call);
return 0;
case TRACE_REG_PERF_OPEN:
case TRACE_REG_PERF_CLOSE:
case TRACE_REG_PERF_ADD:
case TRACE_REG_PERF_DEL:
return 0;
#endif
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 111 | 76.03% | 5 | 62.50% |
Jiri Olsa | 19 | 13.01% | 2 | 25.00% |
Mathieu Desnoyers | 16 | 10.96% | 1 | 12.50% |
Total | 146 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(trace_event_reg);
void trace_event_enable_cmd_record(bool enable)
{
struct trace_event_file *file;
struct trace_array *tr;
mutex_lock(&event_mutex);
do_for_each_event_file(tr, file) {
if (!(file->flags & EVENT_FILE_FL_ENABLED))
continue;
if (enable) {
tracing_start_cmdline_record();
set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
} else {
tracing_stop_cmdline_record();
clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
} while_for_each_event_file();
mutex_unlock(&event_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 59 | 67.05% | 1 | 20.00% |
Steven Rostedt | 29 | 32.95% | 4 | 80.00% |
Total | 88 | 100.00% | 5 | 100.00% |
void trace_event_enable_tgid_record(bool enable)
{
struct trace_event_file *file;
struct trace_array *tr;
mutex_lock(&event_mutex);
do_for_each_event_file(tr, file) {
if (!(file->flags & EVENT_FILE_FL_ENABLED))
continue;
if (enable) {
tracing_start_tgid_record();
set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
} else {
tracing_stop_tgid_record();
clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
&file->flags);
}
} while_for_each_event_file();
mutex_unlock(&event_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Joel Fernandes | 88 | 100.00% | 1 | 100.00% |
Total | 88 | 100.00% | 1 | 100.00% |
static int __ftrace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable)
{
struct trace_event_call *call = file->event_call;
struct trace_array *tr = file->tr;
unsigned long file_flags = file->flags;
int ret = 0;
int disable;
switch (enable) {
case 0:
/*
* When soft_disable is set and enable is cleared, the sm_ref
* reference counter is decremented. If it reaches 0, we want
* to clear the SOFT_DISABLED flag but leave the event in the
* state that it was. That is, if the event was enabled and
* SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
* is set we do not want the event to be enabled before we
* clear the bit.
*
* When soft_disable is not set but the SOFT_MODE flag is,
* we do nothing. Do not disable the tracepoint, otherwise
* "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
*/
if (soft_disable) {
if (atomic_dec_return(&file->sm_ref) > 0)
break;
disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
} else
disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
tracing_stop_cmdline_record();
clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
tracing_stop_tgid_record();
clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
}
call->class->reg(call, TRACE_REG_UNREGISTER, file);
}
/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
if (file->flags & EVENT_FILE_FL_SOFT_MODE)
set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
else
clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
break;
case 1:
/*
* When soft_disable is set and enable is set, we want to
* register the tracepoint for the event, but leave the event
* as is. That means, if the event was already enabled, we do
* nothing (but set SOFT_MODE). If the event is disabled, we
* set SOFT_DISABLED before enabling the event tracepoint, so
* it still seems to be disabled.
*/
if (!soft_disable)
clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
else {
if (atomic_inc_return(&file->sm_ref) > 1)
break;
set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
}
if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
bool cmd = false, tgid = false;
/* Keep the event disabled, when going to SOFT_MODE. */
if (soft_disable)
set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
cmd = true;
tracing_start_cmdline_record();
set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
tgid = true;
tracing_start_tgid_record();
set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
}
ret = call->class->reg(call, TRACE_REG_REGISTER, file);
if (ret) {
if (cmd)
tracing_stop_cmdline_record();
if (tgid)
tracing_stop_tgid_record();
pr_info("event trace: Could not enable event "
"%s\n", trace_event_name(call));
break;
}
set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
/* WAS_ENABLED gets set but never cleared. */
set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
}
break;
}
/* Enable or disable use of trace_buffered_event */
if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
(file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
trace_buffered_event_enable();
else
trace_buffered_event_disable();
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 286 | 61.37% | 13 | 56.52% |
Joel Fernandes | 73 | 15.67% | 1 | 4.35% |
Li Zefan | 54 | 11.59% | 2 | 8.70% |
Masami Hiramatsu | 29 | 6.22% | 1 | 4.35% |
Tom Zanussi | 11 | 2.36% | 1 | 4.35% |
Zhao Lei | 6 | 1.29% | 1 | 4.35% |
Jiri Olsa | 2 | 0.43% | 1 | 4.35% |
Jason Baron | 2 | 0.43% | 1 | 4.35% |
Mathieu Desnoyers | 2 | 0.43% | 1 | 4.35% |
Chunyu Hu | 1 | 0.21% | 1 | 4.35% |
Total | 466 | 100.00% | 23 | 100.00% |
int trace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable)
{
return __ftrace_event_enable_disable(file, enable, soft_disable);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 25 | 96.15% | 1 | 50.00% |
Steven Rostedt | 1 | 3.85% | 1 | 50.00% |
Total | 26 | 100.00% | 2 | 100.00% |
static int ftrace_event_enable_disable(struct trace_event_file *file,
int enable)
{
return __ftrace_event_enable_disable(file, enable, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 24 | 100.00% | 2 | 100.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static void ftrace_clear_events(struct trace_array *tr)
{
struct trace_event_file *file;
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
ftrace_event_enable_disable(file, 0);
}
mutex_unlock(&event_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Zhao Lei | 36 | 80.00% | 1 | 33.33% |
Steven Rostedt | 9 | 20.00% | 2 | 66.67% |
Total | 45 | 100.00% | 3 | 100.00% |
static void
event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
{
struct trace_pid_list *pid_list;
struct trace_array *tr = data;
pid_list = rcu_dereference_sched(tr->filtered_pids);
trace_filter_add_remove_task(pid_list, NULL, task);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 45 | 100.00% | 4 | 100.00% |
Total | 45 | 100.00% | 4 | 100.00% |
static void
event_filter_pid_sched_process_fork(void *data,
struct task_struct *self,
struct task_struct *task)
{
struct trace_pid_list *pid_list;
struct trace_array *tr = data;
pid_list = rcu_dereference_sched(tr->filtered_pids);
trace_filter_add_remove_task(pid_list, self, task);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 50 | 100.00% | 2 | 100.00% |
Total | 50 | 100.00% | 2 | 100.00% |
void trace_event_follow_fork(struct trace_array *tr, bool enable)
{
if (enable) {
register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
tr, INT_MIN);
register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
tr, INT_MAX);
} else {
unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
tr);
unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
tr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 54 | 100.00% | 2 | 100.00% |
Total | 54 | 100.00% | 2 | 100.00% |
static void
event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, prev) &&
trace_ignore_this_task(pid_list, next));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 66 | 95.65% | 4 | 80.00% |
Linus Torvalds | 3 | 4.35% | 1 | 20.00% |
Total | 69 | 100.00% | 5 | 100.00% |
static void
event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, next));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 59 | 95.16% | 3 | 75.00% |
Linus Torvalds | 3 | 4.84% | 1 | 25.00% |
Total | 62 | 100.00% | 4 | 100.00% |
static void
event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
/* Nothing to do if we are already tracing */
if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, task));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 64 | 91.43% | 4 | 80.00% |
Zhao Lei | 6 | 8.57% | 1 | 20.00% |
Total | 70 | 100.00% | 5 | 100.00% |
static void
event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
/* Nothing to do if we are not tracing */
if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
/* Set tracing if current is enabled */
this_cpu_write(tr->trace_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 70 | 100.00% | 4 | 100.00% |
Total | 70 | 100.00% | 4 | 100.00% |
static void __ftrace_clear_event_pids(struct trace_array *tr)
{
struct trace_pid_list *pid_list;
struct trace_event_file *file;
int cpu;
pid_list = rcu_dereference_protected(tr->filtered_pids,
lockdep_is_held(&event_mutex));
if (!pid_list)
return;
unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
list_for_each_entry(file, &tr->events, list) {
clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
}
for_each_possible_cpu(cpu)
per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
rcu_assign_pointer(tr->filtered_pids, NULL);
/* Wait till all users are no longer using pid filtering */
synchronize_sched();
trace_free_pid_list(pid_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 133 | 84.18% | 4 | 80.00% |
Oleg Nesterov | 25 | 15.82% | 1 | 20.00% |
Total | 158 | 100.00% | 5 | 100.00% |
static void ftrace_clear_event_pids(struct trace_array *tr)
{
mutex_lock(&event_mutex);
__ftrace_clear_event_pids(tr);
mutex_unlock(&event_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 27 | 96.43% | 1 | 50.00% |
Oleg Nesterov | 1 | 3.57% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
static void __put_system(struct event_subsystem *system)
{
struct event_filter *filter = system->filter;
WARN_ON_ONCE(system_refcount(system) == 0);
if (system_refcount_dec(system))
return;
list_del(&system->list);
if (filter) {
kfree(filter->filter_string);
kfree(filter);
}
kfree_const(system->name);
kfree(system);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 52 | 68.42% | 4 | 50.00% |
Oleg Nesterov | 20 | 26.32% | 2 | 25.00% |
David Howells | 3 | 3.95% | 1 | 12.50% |
Rasmus Villemoes | 1 | 1.32% | 1 | 12.50% |
Total | 76 | 100.00% | 8 | 100.00% |
static void __get_system(struct event_subsystem *system)
{
WARN_ON_ONCE(system_refcount(system) == 0);
system_refcount_inc(system);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 23 | 88.46% | 3 | 75.00% |
Oleg Nesterov | 3 | 11.54% | 1 | 25.00% |
Total | 26 | 100.00% | 4 | 100.00% |
static void __get_system_dir(struct trace_subsystem_dir *dir)
{
WARN_ON_ONCE(dir->ref_count == 0);
dir->ref_count++;
__get_system(dir->subsystem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 28 | 87.50% | 4 | 66.67% |
Mathieu Desnoyers | 3 | 9.38% | 1 | 16.67% |
Li Zefan | 1 | 3.12% | 1 | 16.67% |
Total | 32 | 100.00% | 6 | 100.00% |
static void __put_system_dir(struct trace_subsystem_dir *dir)
{
WARN_ON_ONCE(dir->ref_count == 0);
/* If the subsystem is about to be freed, the dir must be too */
WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
__put_system(dir->subsystem);
if (!--dir->ref_count)
kfree(dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 59 | 100.00% | 8 | 100.00% |
Total | 59 | 100.00% | 8 | 100.00% |
static void put_system(struct