cregit-Linux how code gets into the kernel

Release 4.15 kernel/trace/trace_events.c

Directory: kernel/trace
/*
 * event tracer
 *
 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
 *
 *  - Added format output of fields of the trace point.
 *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
 *
 */


#define pr_fmt(fmt) fmt

#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/tracefs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/delay.h>

#include <trace/events/sched.h>

#include <asm/setup.h>

#include "trace_output.h"


#undef TRACE_SYSTEM

#define TRACE_SYSTEM "TRACE_SYSTEM"


DEFINE_MUTEX(event_mutex);


LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_generic_fields);
static LIST_HEAD(ftrace_common_fields);


#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)


static struct kmem_cache *field_cachep;

static struct kmem_cache *file_cachep;


static inline int system_refcount(struct event_subsystem *system) { return system->ref_count; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt17100.00%1100.00%
Total17100.00%1100.00%


static int system_refcount_inc(struct event_subsystem *system) { return system->ref_count++; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt17100.00%1100.00%
Total17100.00%1100.00%


static int system_refcount_dec(struct event_subsystem *system) { return --system->ref_count; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt17100.00%1100.00%
Total17100.00%1100.00%

/* Double loops, do not use break, only goto's work */ #define do_for_each_event_file(tr, file) \ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ list_for_each_entry(file, &tr->events, list) #define do_for_each_event_file_safe(tr, file) \ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ struct trace_event_file *___n; \ list_for_each_entry_safe(file, ___n, &tr->events, list) #define while_for_each_event_file() \ }
static struct list_head * trace_get_fields(struct trace_event_call *event_call) { if (!event_call->class->get_fields) return &event_call->class->fields; return event_call->class->get_fields(event_call); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3997.50%266.67%
Jovi Zhangwei12.50%133.33%
Total40100.00%3100.00%


static struct ftrace_event_field * __find_event_field(struct list_head *head, char *name) { struct ftrace_event_field *field; list_for_each_entry(field, head, link) { if (!strcmp(field->name, name)) return field; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jovi Zhangwei50100.00%1100.00%
Total50100.00%1100.00%


struct ftrace_event_field * trace_find_event_field(struct trace_event_call *call, char *name) { struct ftrace_event_field *field; struct list_head *head; head = trace_get_fields(call); field = __find_event_field(head, name); if (field) return field; field = __find_event_field(&ftrace_generic_fields, name); if (field) return field; return __find_event_field(&ftrace_common_fields, name); }

Contributors

PersonTokensPropCommitsCommitProp
Jovi Zhangwei4762.67%125.00%
Daniel Wagner1621.33%125.00%
Steven Rostedt1216.00%250.00%
Total75100.00%4100.00%


static int __trace_define_field(struct list_head *head, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { struct ftrace_event_field *field; field = kmem_cache_alloc(field_cachep, GFP_TRACE); if (!field) return -ENOMEM; field->name = name; field->type = type; if (filter_type == FILTER_OTHER) field->filter_type = filter_assign_type(type); else field->filter_type = filter_type; field->offset = offset; field->size = size; field->is_signed = is_signed; list_add(&field->link, head); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Zanussi8469.42%222.22%
Li Zefan2722.31%333.33%
Namhyung Kim43.31%111.11%
Steven Rostedt43.31%222.22%
Frédéric Weisbecker21.65%111.11%
Total121100.00%9100.00%


int trace_define_field(struct trace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { struct list_head *head; if (WARN_ON(!call->class)) return 0; head = trace_get_fields(call); return __trace_define_field(head, type, name, offset, size, is_signed, filter_type); }

Contributors

PersonTokensPropCommitsCommitProp
Li Zefan7498.67%150.00%
Steven Rostedt11.33%150.00%
Total75100.00%2100.00%

EXPORT_SYMBOL_GPL(trace_define_field); #define __generic_field(type, item, filter_type) \ ret = __trace_define_field(&ftrace_generic_fields, #type, \ #item, 0, 0, is_signed_type(type), \ filter_type); \ if (ret) \ return ret; #define __common_field(type, item) \ ret = __trace_define_field(&ftrace_common_fields, #type, \ "common_" #item, \ offsetof(typeof(ent), item), \ sizeof(ent.item), \ is_signed_type(type), FILTER_OTHER); \ if (ret) \ return ret;
static int trace_define_generic_fields(void) { int ret; __generic_field(int, CPU, FILTER_CPU); __generic_field(int, cpu, FILTER_CPU); __generic_field(char *, COMM, FILTER_COMM); __generic_field(char *, comm, FILTER_COMM); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Wagner3159.62%150.00%
Steven Rostedt2140.38%150.00%
Total52100.00%2100.00%


static int trace_define_common_fields(void) { int ret; struct trace_entry ent; __common_field(unsigned short, type); __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Li Zefan46100.00%3100.00%
Total46100.00%3100.00%


static void trace_destroy_fields(struct trace_event_call *call) { struct ftrace_event_field *field, *next; struct list_head *head; head = trace_get_fields(call); list_for_each_entry_safe(field, next, head, link) { list_del(&field->link); kmem_cache_free(field_cachep, field); } }

Contributors

PersonTokensPropCommitsCommitProp
Li Zefan4068.97%120.00%
Steven Rostedt1729.31%360.00%
Jovi Zhangwei11.72%120.00%
Total58100.00%5100.00%

/* * run-time version of trace_event_get_offsets_<call>() that returns the last * accessible offset of trace fields excluding __dynamic_array bytes */
int trace_event_get_offsets(struct trace_event_call *call) { struct ftrace_event_field *tail; struct list_head *head; head = trace_get_fields(call); /* * head->next points to the last field with the largest offset, * since it was added last by trace_define_field() */ tail = list_first_entry(head, struct ftrace_event_field, link); return tail->offset + tail->size; }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov49100.00%1100.00%
Total49100.00%1100.00%


int trace_event_raw_init(struct trace_event_call *call) { int id; id = register_trace_event(&call->event); if (!id) return -ENODEV; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Li Zefan3291.43%125.00%
Steven Rostedt38.57%375.00%
Total35100.00%4100.00%

EXPORT_SYMBOL_GPL(trace_event_raw_init);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) { struct trace_array *tr = trace_file->tr; struct trace_array_cpu *data; struct trace_pid_list *pid_list; pid_list = rcu_dereference_sched(tr->filtered_pids); if (!pid_list) return false; data = this_cpu_ptr(tr->trace_buffer.data); return data->ignore_pid; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt62100.00%1100.00%
Total62100.00%1100.00%

EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, struct trace_event_file *trace_file, unsigned long len) { struct trace_event_call *event_call = trace_file->event_call; if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && trace_event_ignore_this_pid(trace_file)) return NULL; local_save_flags(fbuffer->flags); fbuffer->pc = preempt_count(); /* * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables * preemption (adding one to the preempt_count). Since we are * interested in the preempt_count at the time the tracepoint was * hit, we need to subtract one to offset the increment. */ if (IS_ENABLED(CONFIG_PREEMPT)) fbuffer->pc--; fbuffer->trace_file = trace_file; fbuffer->event = trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, event_call->event.type, len, fbuffer->flags, fbuffer->pc); if (!fbuffer->event) return NULL; fbuffer->entry = ring_buffer_event_data(fbuffer->event); return fbuffer->entry; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt136100.00%6100.00%
Total136100.00%6100.00%

EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
int trace_event_reg(struct trace_event_call *call, enum trace_reg type, void *data) { struct trace_event_file *file = data; WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); switch (type) { case TRACE_REG_REGISTER: return tracepoint_probe_register(call->tp, call->class->probe, file); case TRACE_REG_UNREGISTER: tracepoint_probe_unregister(call->tp, call->class->probe, file); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return tracepoint_probe_register(call->tp, call->class->perf_probe, call); case TRACE_REG_PERF_UNREGISTER: tracepoint_probe_unregister(call->tp, call->class->perf_probe, call); return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt11176.03%562.50%
Jiri Olsa1913.01%225.00%
Mathieu Desnoyers1610.96%112.50%
Total146100.00%8100.00%

EXPORT_SYMBOL_GPL(trace_event_reg);
void trace_event_enable_cmd_record(bool enable) { struct trace_event_file *file; struct trace_array *tr; mutex_lock(&event_mutex); do_for_each_event_file(tr, file) { if (!(file->flags & EVENT_FILE_FL_ENABLED)) continue; if (enable) { tracing_start_cmdline_record(); set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); } else { tracing_stop_cmdline_record(); clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); } } while_for_each_event_file(); mutex_unlock(&event_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Li Zefan5967.05%120.00%
Steven Rostedt2932.95%480.00%
Total88100.00%5100.00%


void trace_event_enable_tgid_record(bool enable) { struct trace_event_file *file; struct trace_array *tr; mutex_lock(&event_mutex); do_for_each_event_file(tr, file) { if (!(file->flags & EVENT_FILE_FL_ENABLED)) continue; if (enable) { tracing_start_tgid_record(); set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); } else { tracing_stop_tgid_record(); clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); } } while_for_each_event_file(); mutex_unlock(&event_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Joel Fernandes88100.00%1100.00%
Total88100.00%1100.00%


static int __ftrace_event_enable_disable(struct trace_event_file *file, int enable, int soft_disable) { struct trace_event_call *call = file->event_call; struct trace_array *tr = file->tr; unsigned long file_flags = file->flags; int ret = 0; int disable; switch (enable) { case 0: /* * When soft_disable is set and enable is cleared, the sm_ref * reference counter is decremented. If it reaches 0, we want * to clear the SOFT_DISABLED flag but leave the event in the * state that it was. That is, if the event was enabled and * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED * is set we do not want the event to be enabled before we * clear the bit. * * When soft_disable is not set but the SOFT_MODE flag is, * we do nothing. Do not disable the tracepoint, otherwise * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. */ if (soft_disable) { if (atomic_dec_return(&file->sm_ref) > 0) break; disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); } else disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { tracing_stop_cmdline_record(); clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); } if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { tracing_stop_tgid_record(); clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); } call->class->reg(call, TRACE_REG_UNREGISTER, file); } /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ if (file->flags & EVENT_FILE_FL_SOFT_MODE) set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); else clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); break; case 1: /* * When soft_disable is set and enable is set, we want to * register the tracepoint for the event, but leave the event * as is. That means, if the event was already enabled, we do * nothing (but set SOFT_MODE). If the event is disabled, we * set SOFT_DISABLED before enabling the event tracepoint, so * it still seems to be disabled. */ if (!soft_disable) clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); else { if (atomic_inc_return(&file->sm_ref) > 1) break; set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); } if (!(file->flags & EVENT_FILE_FL_ENABLED)) { bool cmd = false, tgid = false; /* Keep the event disabled, when going to SOFT_MODE. */ if (soft_disable) set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { cmd = true; tracing_start_cmdline_record(); set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); } if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { tgid = true; tracing_start_tgid_record(); set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); } ret = call->class->reg(call, TRACE_REG_REGISTER, file); if (ret) { if (cmd) tracing_stop_cmdline_record(); if (tgid) tracing_stop_tgid_record(); pr_info("event trace: Could not enable event " "%s\n", trace_event_name(call)); break; } set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); /* WAS_ENABLED gets set but never cleared. */ set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); } break; } /* Enable or disable use of trace_buffered_event */ if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) trace_buffered_event_enable(); else trace_buffered_event_disable(); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt28661.37%1356.52%
Joel Fernandes7315.67%14.35%
Li Zefan5411.59%28.70%
Masami Hiramatsu296.22%14.35%
Tom Zanussi112.36%14.35%
Zhao Lei61.29%14.35%
Jiri Olsa20.43%14.35%
Jason Baron20.43%14.35%
Mathieu Desnoyers20.43%14.35%
Chunyu Hu10.21%14.35%
Total466100.00%23100.00%


int trace_event_enable_disable(struct trace_event_file *file, int enable, int soft_disable) { return __ftrace_event_enable_disable(file, enable, soft_disable); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Zanussi2596.15%150.00%
Steven Rostedt13.85%150.00%
Total26100.00%2100.00%


static int ftrace_event_enable_disable(struct trace_event_file *file, int enable) { return __ftrace_event_enable_disable(file, enable, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt24100.00%2100.00%
Total24100.00%2100.00%


static void ftrace_clear_events(struct trace_array *tr) { struct trace_event_file *file; mutex_lock(&event_mutex); list_for_each_entry(file, &tr->events, list) { ftrace_event_enable_disable(file, 0); } mutex_unlock(&event_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Zhao Lei3680.00%133.33%
Steven Rostedt920.00%266.67%
Total45100.00%3100.00%


static void event_filter_pid_sched_process_exit(void *data, struct task_struct *task) { struct trace_pid_list *pid_list; struct trace_array *tr = data; pid_list = rcu_dereference_sched(tr->filtered_pids); trace_filter_add_remove_task(pid_list, NULL, task); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt45100.00%4100.00%
Total45100.00%4100.00%


static void event_filter_pid_sched_process_fork(void *data, struct task_struct *self, struct task_struct *task) { struct trace_pid_list *pid_list; struct trace_array *tr = data; pid_list = rcu_dereference_sched(tr->filtered_pids); trace_filter_add_remove_task(pid_list, self, task); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt50100.00%2100.00%
Total50100.00%2100.00%


void trace_event_follow_fork(struct trace_array *tr, bool enable) { if (enable) { register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, tr, INT_MIN); register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, tr, INT_MAX); } else { unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, tr); unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, tr); } }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt54100.00%2100.00%
Total54100.00%2100.00%


static void event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, struct task_struct *prev, struct task_struct *next) { struct trace_array *tr = data; struct trace_pid_list *pid_list; pid_list = rcu_dereference_sched(tr->filtered_pids); this_cpu_write(tr->trace_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, prev) && trace_ignore_this_task(pid_list, next)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt6695.65%480.00%
Linus Torvalds34.35%120.00%
Total69100.00%5100.00%


static void event_filter_pid_sched_switch_probe_post(void *data, bool preempt, struct task_struct *prev, struct task_struct *next) { struct trace_array *tr = data; struct trace_pid_list *pid_list; pid_list = rcu_dereference_sched(tr->filtered_pids); this_cpu_write(tr->trace_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, next)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt5995.16%375.00%
Linus Torvalds34.84%125.00%
Total62100.00%4100.00%


static void event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) { struct trace_array *tr = data; struct trace_pid_list *pid_list; /* Nothing to do if we are already tracing */ if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) return; pid_list = rcu_dereference_sched(tr->filtered_pids); this_cpu_write(tr->trace_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, task)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt6491.43%480.00%
Zhao Lei68.57%120.00%
Total70100.00%5100.00%


static void event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) { struct trace_array *tr = data; struct trace_pid_list *pid_list; /* Nothing to do if we are not tracing */ if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) return; pid_list = rcu_dereference_sched(tr->filtered_pids); /* Set tracing if current is enabled */ this_cpu_write(tr->trace_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, current)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt70100.00%4100.00%
Total70100.00%4100.00%


static void __ftrace_clear_event_pids(struct trace_array *tr) { struct trace_pid_list *pid_list; struct trace_event_file *file; int cpu; pid_list = rcu_dereference_protected(tr->filtered_pids, lockdep_is_held(&event_mutex)); if (!pid_list) return; unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); list_for_each_entry(file, &tr->events, list) { clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); } for_each_possible_cpu(cpu) per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; rcu_assign_pointer(tr->filtered_pids, NULL); /* Wait till all users are no longer using pid filtering */ synchronize_sched(); trace_free_pid_list(pid_list); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt13384.18%480.00%
Oleg Nesterov2515.82%120.00%
Total158100.00%5100.00%


static void ftrace_clear_event_pids(struct trace_array *tr) { mutex_lock(&event_mutex); __ftrace_clear_event_pids(tr); mutex_unlock(&event_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2796.43%150.00%
Oleg Nesterov13.57%150.00%
Total28100.00%2100.00%


static void __put_system(struct event_subsystem *system) { struct event_filter *filter = system->filter; WARN_ON_ONCE(system_refcount(system) == 0); if (system_refcount_dec(system)) return; list_del(&system->list); if (filter) { kfree(filter->filter_string); kfree(filter); } kfree_const(system->name); kfree(system); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt5268.42%450.00%
Oleg Nesterov2026.32%225.00%
David Howells33.95%112.50%
Rasmus Villemoes11.32%112.50%
Total76100.00%8100.00%


static void __get_system(struct event_subsystem *system) { WARN_ON_ONCE(system_refcount(system) == 0); system_refcount_inc(system); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2388.46%375.00%
Oleg Nesterov311.54%125.00%
Total26100.00%4100.00%


static void __get_system_dir(struct trace_subsystem_dir *dir) { WARN_ON_ONCE(dir->ref_count == 0); dir->ref_count++; __get_system(dir->subsystem); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2887.50%466.67%
Mathieu Desnoyers39.38%116.67%
Li Zefan13.12%116.67%
Total32100.00%6100.00%


static void __put_system_dir(struct trace_subsystem_dir *dir) { WARN_ON_ONCE(dir->ref_count == 0); /* If the subsystem is about to be freed, the dir must be too */ WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); __put_system(dir->subsystem); if (!--dir->ref_count) kfree(dir); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt59100.00%8100.00%
Total59100.00%8100.00%


static void put_system(struct