Release 4.15 kernel/trace/trace_events_filter.c
/*
* trace_events_filter - generic event filtering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include "trace.h"
#include "trace_output.h"
#define DEFAULT_SYS_FILTER_MESSAGE \
"### global filter ###\n" \
"# Use this to set filters for multiple events.\n" \
"# Only events with the given fields will be affected.\n" \
"# If no events are modified, an error message will be displayed here"
enum filter_op_ids
{
OP_OR,
OP_AND,
OP_GLOB,
OP_NE,
OP_EQ,
OP_LT,
OP_LE,
OP_GT,
OP_GE,
OP_BAND,
OP_NOT,
OP_NONE,
OP_OPEN_PAREN,
};
struct filter_op {
int id;
char *string;
int precedence;
};
/* Order must be the same as enum filter_op_ids above */
static struct filter_op filter_ops[] = {
{ OP_OR, "||", 1 },
{ OP_AND, "&&", 2 },
{ OP_GLOB, "~", 4 },
{ OP_NE, "!=", 4 },
{ OP_EQ, "==", 4 },
{ OP_LT, "<", 5 },
{ OP_LE, "<=", 5 },
{ OP_GT, ">", 5 },
{ OP_GE, ">=", 5 },
{ OP_BAND, "&", 6 },
{ OP_NOT, "!", 6 },
{ OP_NONE, "OP_NONE", 0 },
{ OP_OPEN_PAREN, "(", 0 },
};
enum {
FILT_ERR_NONE,
FILT_ERR_INVALID_OP,
FILT_ERR_UNBALANCED_PAREN,
FILT_ERR_TOO_MANY_OPERANDS,
FILT_ERR_OPERAND_TOO_LONG,
FILT_ERR_FIELD_NOT_FOUND,
FILT_ERR_ILLEGAL_FIELD_OP,
FILT_ERR_ILLEGAL_INTVAL,
FILT_ERR_BAD_SUBSYS_FILTER,
FILT_ERR_TOO_MANY_PREDS,
FILT_ERR_MISSING_FIELD,
FILT_ERR_INVALID_FILTER,
FILT_ERR_IP_FIELD_ONLY,
FILT_ERR_ILLEGAL_NOT_OP,
};
static char *err_text[] = {
"No error",
"Invalid operator",
"Unbalanced parens",
"Too many operands",
"Operand too long",
"Field not found",
"Illegal operation for field type",
"Illegal integer value",
"Couldn't find or set field in one of a subsystem's events",
"Too many terms in predicate expression",
"Missing field name and/or value",
"Meaningless filter expression",
"Only 'ip' field is supported for function trace",
"Illegal use of '!'",
};
struct opstack_op {
enum filter_op_ids op;
struct list_head list;
};
struct postfix_elt {
enum filter_op_ids op;
char *operand;
struct list_head list;
};
struct filter_parse_state {
struct filter_op *ops;
struct list_head opstack;
struct list_head postfix;
int lasterr;
int lasterr_pos;
struct {
char *string;
unsigned int cnt;
unsigned int tail;
} infix;
struct {
char string[MAX_FILTER_STR_VAL];
int pos;
unsigned int tail;
} operand;
};
struct pred_stack {
struct filter_pred **preds;
int index;
};
/* If not of not match is equal to not of not, then it is a match */
#define DEFINE_COMPARISON_PRED(type) \
static int filter_pred_LT_##type(struct filter_pred *pred, void *event) \
{ \
type *addr = (type *)(event + pred->offset); \
type val = (type)pred->val; \
int match = (*addr < val); \
return !!match == !pred->not; \
} \
static int filter_pred_LE_##type(struct filter_pred *pred, void *event) \
{ \
type *addr = (type *)(event + pred->offset); \
type val = (type)pred->val; \
int match = (*addr <= val); \
return !!match == !pred->not; \
} \
static int filter_pred_GT_##type(struct filter_pred *pred, void *event) \
{ \
type *addr = (type *)(event + pred->offset); \
type val = (type)pred->val; \
int match = (*addr > val); \
return !!match == !pred->not; \
} \
static int filter_pred_GE_##type(struct filter_pred *pred, void *event) \
{ \
type *addr = (type *)(event + pred->offset); \
type val = (type)pred->val; \
int match = (*addr >= val); \
return !!match == !pred->not; \
} \
static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \
{ \
type *addr = (type *)(event + pred->offset); \
type val = (type)pred->val; \
int match = !!(*addr & val); \
return match == !pred->not; \
} \
static const filter_pred_fn_t pred_funcs_##type[] = { \
filter_pred_LT_##type, \
filter_pred_LE_##type, \
filter_pred_GT_##type, \
filter_pred_GE_##type, \
filter_pred_BAND_##type, \
};
#define PRED_FUNC_START OP_LT
#define DEFINE_EQUALITY_PRED(size) \
static int filter_pred_##size(struct filter_pred *pred, void *event) \
{ \
u##size *addr = (u##size *)(event + pred->offset); \
u##size val = (u##size)pred->val; \
int match; \
\
match = (val == *addr) ^ pred->not; \
\
return match; \
}
DEFINE_COMPARISON_PRED(s64);
DEFINE_COMPARISON_PRED(u64);
DEFINE_COMPARISON_PRED(s32);
DEFINE_COMPARISON_PRED(u32);
DEFINE_COMPARISON_PRED(s16);
DEFINE_COMPARISON_PRED(u16);
DEFINE_COMPARISON_PRED(s8);
DEFINE_COMPARISON_PRED(u8);
DEFINE_EQUALITY_PRED(64);
DEFINE_EQUALITY_PRED(32);
DEFINE_EQUALITY_PRED(16);
DEFINE_EQUALITY_PRED(8);
/* Filter predicate for fixed sized arrays of characters */
static int filter_pred_string(struct filter_pred *pred, void *event)
{
char *addr = (char *)(event + pred->offset);
int cmp, match;
cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
match = cmp ^ pred->not;
return match;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 59 | 85.51% | 1 | 50.00% |
Frédéric Weisbecker | 10 | 14.49% | 1 | 50.00% |
Total | 69 | 100.00% | 2 | 100.00% |
/* Filter predicate for char * pointers */
static int filter_pred_pchar(struct filter_pred *pred, void *event)
{
char **addr = (char **)(event + pred->offset);
int cmp, match;
int len = strlen(*addr) + 1; /* including tailing '\0' */
cmp = pred->regex.match(*addr, &pred->regex, len);
match = cmp ^ pred->not;
return match;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 73 | 91.25% | 2 | 66.67% |
Frédéric Weisbecker | 7 | 8.75% | 1 | 33.33% |
Total | 80 | 100.00% | 3 | 100.00% |
/*
* Filter predicate for dynamic sized arrays of characters.
* These are implemented through a list of strings at the end
* of the entry.
* Also each of these strings have a field in the entry which
* contains its offset from the beginning of the entry.
* We have then first to get this field, dereference it
* and add it to the address of the entry, and at last we have
* the address of the string.
*/
static int filter_pred_strloc(struct filter_pred *pred, void *event)
{
u32 str_item = *(u32 *)(event + pred->offset);
int str_loc = str_item & 0xffff;
int str_len = str_item >> 16;
char *addr = (char *)(event + str_loc);
int cmp, match;
cmp = pred->regex.match(addr, &pred->regex, str_len);
match = cmp ^ pred->not;
return match;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 76 | 81.72% | 2 | 66.67% |
Li Zefan | 17 | 18.28% | 1 | 33.33% |
Total | 93 | 100.00% | 3 | 100.00% |
/* Filter predicate for CPUs. */
static int filter_pred_cpu(struct filter_pred *pred, void *event)
{
int cpu, cmp;
int match = 0;
cpu = raw_smp_processor_id();
cmp = pred->val;
switch (pred->op) {
case OP_EQ:
match = cpu == cmp;
break;
case OP_LT:
match = cpu < cmp;
break;
case OP_LE:
match = cpu <= cmp;
break;
case OP_GT:
match = cpu > cmp;
break;
case OP_GE:
match = cpu >= cmp;
break;
default:
break;
}
return !!match == !pred->not;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Wagner | 106 | 100.00% | 1 | 100.00% |
Total | 106 | 100.00% | 1 | 100.00% |
/* Filter predicate for COMM. */
static int filter_pred_comm(struct filter_pred *pred, void *event)
{
int cmp, match;
cmp = pred->regex.match(current->comm, &pred->regex,
pred->regex.field_len);
match = cmp ^ pred->not;
return match;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Wagner | 55 | 100.00% | 1 | 100.00% |
Total | 55 | 100.00% | 1 | 100.00% |
static int filter_pred_none(struct filter_pred *pred, void *event)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
/*
* regex_match_foo - Basic regex callbacks
*
* @str: the string to be searched
* @r: the regex structure containing the pattern string
* @len: the length of the string to be searched (including '\0')
*
* Note:
* - @str might not be NULL-terminated if it's of type DYN_STRING
* or STATIC_STRING
*/
static int regex_match_full(char *str, struct regex *r, int len)
{
if (strncmp(str, r->pattern, len) == 0)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
static int regex_match_front(char *str, struct regex *r, int len)
{
if (strncmp(str, r->pattern, r->len) == 0)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 39 | 95.12% | 1 | 50.00% |
Li Zefan | 2 | 4.88% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
static int regex_match_middle(char *str, struct regex *r, int len)
{
if (strnstr(str, r->pattern, len))
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 34 | 91.89% | 1 | 50.00% |
Li Zefan | 3 | 8.11% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
static int regex_match_end(char *str, struct regex *r, int len)
{
int strlen = len - 1;
if (strlen >= r->len &&
memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 37 | 61.67% | 1 | 50.00% |
Li Zefan | 23 | 38.33% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
static int regex_match_glob(char *str, struct regex *r, int len __maybe_unused)
{
if (glob_match(r->pattern, str))
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
/**
* filter_parse_regex - parse a basic regex
* @buff: the raw regex
* @len: length of the regex
* @search: will point to the beginning of the string to compare
* @not: tell whether the match will have to be inverted
*
* This passes in a buffer containing a regex and this function will
* set search to point to the search part of the buffer and
* return the type of search it is (see enum above).
* This does modify buff.
*
* Returns enum type.
* search returns the pointer to use for comparison.
* not returns 1 if buff started with a '!'
* 0 otherwise.
*/
enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
{
int type = MATCH_FULL;
int i;
if (buff[0] == '!') {
*not = 1;
buff++;
len--;
} else
*not = 0;
*search = buff;
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
*search = buff + 1;
type = MATCH_END_ONLY;
} else if (i == len - 1) {
if (type == MATCH_END_ONLY)
type = MATCH_MIDDLE_ONLY;
else
type = MATCH_FRONT_ONLY;
buff[i] = 0;
break;
} else { /* pattern continues, use full glob */
type = MATCH_GLOB;
break;
}
} else if (strchr("[?\\", buff[i])) {
type = MATCH_GLOB;
break;
}
}
return type;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 135 | 78.03% | 1 | 50.00% |
Masami Hiramatsu | 38 | 21.97% | 1 | 50.00% |
Total | 173 | 100.00% | 2 | 100.00% |
static void filter_build_regex(struct filter_pred *pred)
{
struct regex *r = &pred->regex;
char *search;
enum regex_type type = MATCH_FULL;
int not = 0;
if (pred->op == OP_GLOB) {
type = filter_parse_regex(r->pattern, r->len, &search, ¬);
r->len = strlen(search);
memmove(r->pattern, search, r->len+1);
}
switch (type) {
case MATCH_FULL:
r->match = regex_match_full;
break;
case MATCH_FRONT_ONLY:
r->match = regex_match_front;
break;
case MATCH_MIDDLE_ONLY:
r->match = regex_match_middle;
break;
case MATCH_END_ONLY:
r->match = regex_match_end;
break;
case MATCH_GLOB:
r->match = regex_match_glob;
break;
}
pred->not ^= not;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 117 | 77.48% | 1 | 33.33% |
Li Zefan | 24 | 15.89% | 1 | 33.33% |
Masami Hiramatsu | 10 | 6.62% | 1 | 33.33% |
Total | 151 | 100.00% | 3 | 100.00% |
enum move_type {
MOVE_DOWN,
MOVE_UP_FROM_LEFT,
MOVE_UP_FROM_RIGHT
};
static struct filter_pred *
get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
int index, enum move_type *move)
{
if (pred->parent & FILTER_PRED_IS_RIGHT)
*move = MOVE_UP_FROM_RIGHT;
else
*move = MOVE_UP_FROM_LEFT;
pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
return pred;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
enum walk_return {
WALK_PRED_ABORT,
WALK_PRED_PARENT,
WALK_PRED_DEFAULT,
};
typedef int (*filter_pred_walkcb_t) (enum move_type move,
struct filter_pred *pred,
int *err, void *data);
static int walk_pred_tree(struct filter_pred *preds,
struct filter_pred *root,
filter_pred_walkcb_t cb, void *data)
{
struct filter_pred *pred = root;
enum move_type move = MOVE_DOWN;
int done = 0;
if (!preds)
return -EINVAL;
do {
int err = 0, ret;
ret = cb(move, pred, &err, data);
if (ret == WALK_PRED_ABORT)
return err;
if (ret == WALK_PRED_PARENT)
goto get_parent;
switch (move) {
case MOVE_DOWN:
if (pred->left != FILTER_PRED_INVALID) {
pred = &preds[pred->left];
continue;
}
goto get_parent;
case MOVE_UP_FROM_LEFT:
pred = &preds[pred->right];
move = MOVE_DOWN;
continue;
case MOVE_UP_FROM_RIGHT:
get_parent:
if (pred == root)
break;
pred = get_pred_parent(pred, preds,
pred->parent,
&move);
continue;
}
done = 1;
} while (!done);
/* We are fine. */
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 186 | 100.00% | 1 | 100.00% |
Total | 186 | 100.00% | 1 | 100.00% |
/*
* A series of AND or ORs where found together. Instead of
* climbing up and down the tree branches, an array of the
* ops were made in order of checks. We can just move across
* the array and short circuit if needed.
*/
static int process_ops(struct filter_pred *preds,
struct filter_pred *op, void *rec)
{
struct filter_pred *pred;
int match = 0;
int type;
int i;
/*
* Micro-optimization: We set type to true if op
* is an OR and false otherwise (AND). Then we
* just need to test if the match is equal to
* the type, and if it is, we can short circuit the
* rest of the checks:
*
* if ((match && op->op == OP_OR) ||
* (!match && op->op == OP_AND))
* return match;
*/
type = op->op == OP_OR;
for (i = 0; i < op->val; i++) {
pred = &preds[op->ops[i]];
if (!WARN_ON_ONCE(!pred->fn))
match = pred->fn(pred, rec);
if (!!match == type)
break;
}
/* If not of not match is equal to not of not, then it is a match */
return !!match == !op->not;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 102 | 87.18% | 2 | 50.00% |
Jiri Olsa | 11 | 9.40% | 1 | 25.00% |
Ingo Molnar | 4 | 3.42% | 1 | 25.00% |
Total | 117 | 100.00% | 4 | 100.00% |
struct filter_match_preds_data {
struct filter_pred *preds;
int match;
void *rec;
};
static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
int *err, void *data)
{
struct filter_match_preds_data *d = data;
*err = 0;
switch (move) {
case MOVE_DOWN:
/* only AND and OR have children */
if (pred->left != FILTER_PRED_INVALID) {
/* If ops is set, then it was folded. */
if (!pred->ops)
return WALK_PRED_DEFAULT;
/* We can treat folded ops as a leaf node */
d->match = process_ops(d->preds, pred, d->rec);
} else {
if (!WARN_ON_ONCE(!pred->fn))
d->match = pred->fn(pred, d->rec);
}
return WALK_PRED_PARENT;
case MOVE_UP_FROM_LEFT:
/*
* Check for short circuits.
*
* Optimization: !!match == (pred->op == OP_OR)
* is the same as:
* if ((match && pred->op == OP_OR) ||
* (!match && pred->op == OP_AND))
*/
if (!!d->match == (pred->op == OP_OR))
return WALK_PRED_PARENT;
break;
case MOVE_UP_FROM_RIGHT:
break;
}
return WALK_PRED_DEFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 65 | 44.22% | 1 | 16.67% |
Steven Rostedt | 65 | 44.22% | 4 | 66.67% |
Tom Zanussi | 17 | 11.56% | 1 | 16.67% |
Total | 147 | 100.00% | 6 | 100.00% |
/* return 1 if event matches, 0 otherwise (discard) */
int filter_match_preds(struct event_filter *filter, void *rec)
{
struct filter_pred *preds;
struct filter_pred *root;
struct filter_match_preds_data data = {
/* match is currently meaningless */
.match = -1,
.rec = rec,
};
int n_preds, ret;
/* no filter is considered a match */
if (!filter)
return 1;
n_preds = filter->n_preds;
if (!n_preds)
return 1;
/*
* n_preds, root and filter->preds are protect with preemption disabled.
*/
root = rcu_dereference_sched(filter->root);
if (!root)
return 1;
data.preds = preds = rcu_dereference_sched(filter->preds);
ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
WARN_ON(ret);
return data.match;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 98 | 78.40% | 1 | 25.00% |
Steven Rostedt | 17 | 13.60% | 2 | 50.00% |
Tom Zanussi | 10 | 8.00% | 1 | 25.00% |
Total | 125 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(filter_match_preds);
static void parse_error(struct filter_parse_state *ps, int err, int pos)
{
ps->lasterr = err;
ps->lasterr_pos = pos;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static void remove_filter_string(struct event_filter *filter)
{
if (!filter)
return;
kfree(filter->filter_string);
filter->filter_string = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 24 | 80.00% | 1 | 50.00% |
Steven Rostedt | 6 | 20.00% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static int replace_filter_string(struct event_filter *filter,
char *filter_string)
{
kfree(filter->filter_string);
filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
if (!filter->filter_string)
return -ENOMEM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 47 | 100.00% | 1 | 100.00% |
Total | 47 | 100.00% | 1 | 100.00% |
static int append_filter_string(struct event_filter *filter,
char *string)
{
int newlen;
char *new_filter_string;
BUG_ON(!filter->filter_string);
newlen = strlen(filter->filter_string) + strlen(string) + 1;
new_filter_string = kmalloc(newlen, GFP_KERNEL);
if (!new_filter_string)
return -ENOMEM;
strcpy(new_filter_string, filter->filter_string);
strcat(new_filter_string, string);
kfree(filter->filter_string);
filter->filter_string = new_filter_string;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 96 | 100.00% | 1 | 100.00% |
Total | 96 | 100.00% | 1 | 100.00% |
static void append_filter_err(struct filter_parse_state *ps,
struct event_filter *filter)
{
int pos = ps->lasterr_pos;
char *buf, *pbuf;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return;
append_filter_string(filter, "\n");
memset(buf, ' ', PAGE_SIZE);
if (pos > PAGE_SIZE - 128)
pos = 0;
buf[pos] = '^';
pbuf = &buf[pos] + 1;
sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
append_filter_string(filter, buf);
free_page((unsigned long) buf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 121 | 99.18% | 1 | 50.00% |
Michal Hocko | 1 | 0.82% | 1 | 50.00% |
Total | 122 | 100.00% | 2 | 100.00% |
static inline struct event_filter *event_filter(struct trace_event_file *file)
{
return file->filter;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 18 | 94.74% | 1 | 50.00% |
Steven Rostedt | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
/* caller must hold event_mutex */
void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
{
struct event_filter *filter = event_filter(file);
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_puts(s, "none\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 46 | 88.46% | 2 | 33.33% |
Oleg Nesterov | 2 | 3.85% | 1 | 16.67% |
Li Zefan | 2 | 3.85% | 1 | 16.67% |
Jovi Zhangwei | 1 | 1.92% | 1 | 16.67% |
Steven Rostedt | 1 | 1.92% | 1 | 16.67% |
Total | 52 | 100.00% | 6 | 100.00% |
void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s)
{
struct event_filter *filter;
mutex_lock(&event_mutex);
filter = system->filter;
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
mutex_unlock(&event_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 53 | 80.30% | 1 | 16.67% |
Steven Rostedt | 8 | 12.12% | 2 | 33.33% |
Li Zefan | 4 | 6.06% | 2 | 33.33% |
Jovi Zhangwei | 1 | 1.52% | 1 | 16.67% |
Total | 66 | 100.00% | 6 | 100.00% |
static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
{
stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
if (!stack->preds)
return -ENOMEM;
stack->index = n_preds;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 50 | 90.91% | 1 | 50.00% |
Thomas Meyer | 5 | 9.09% | 1 | 50.00% |
Total | 55 | 100.00% | 2 | 100.00% |
static void __free_pred_stack(struct pred_stack *stack)
{
kfree(stack->preds);
stack->index = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static int __push_pred_stack(struct pred_stack *stack,
struct filter_pred *pred)
{
int index = stack->index;
if (WARN_ON(index == 0))
return -ENOSPC;
stack->preds[--index] = pred;
stack->index = index;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 55 | 100.00% | 1 | 100.00% |
Total | 55 | 100.00% | 1 | 100.00% |
static struct filter_pred *
__pop_pred_stack(struct pred_stack *stack)
{
struct filter_pred *pred;
int index = stack->index;
pred = stack->preds[index++];
if (!pred)
return NULL;
stack->index = index;
return pred;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static int filter_set_pred(struct event_filter *filter,
int idx,
struct pred_stack *stack,
struct filter_pred *src)
{
struct filter_pred *dest = &filter->preds[idx];
struct filter_pred *left;
struct filter_pred *right;
*dest = *src;
dest->index = idx;
if (dest->op == OP_OR || dest->op == OP_AND) {
right = __pop_pred_stack(stack);
left = __pop_pred_stack(stack);
if (!left || !right)
return -EINVAL;
/*
* If both children can be folded
* and they are the same op as this op or a leaf,
* then this op can be folded.
*/
if (left->index & FILTER_PRED_FOLD &&
((left->op == dest->op && !left->not) ||
left->left == FILTER_PRED_INVALID) &&
right->index & FILTER_PRED_FOLD &&
((right->op == dest->op && !right->not) ||
right->left == FILTER_PRED_INVALID))
dest->index |= FILTER_PRED_FOLD;
dest->left = left->index & ~FILTER_PRED_FOLD;
dest->right = right->index & ~FILTER_PRED_FOLD;
left->parent = dest->index & ~FILTER_PRED_FOLD;
right->parent = dest->index | FILTER_PRED_IS_RIGHT;
} else {
/*
* Make dest->left invalid to be used as a quick
* way to know this is a leaf node.
*/
dest->left = FILTER_PRED_INVALID;
/* All leafs allow folding the parent ops. */
dest->index |= FILTER_PRED_FOLD;
}
return __push_pred_stack(stack, dest);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 215 | 91.10% | 3 | 75.00% |
Tom Zanussi | 21 | 8.90% | 1 | 25.00% |
Total | 236 | 100.00% | 4 | 100.00% |
static void __free_preds(struct event_filter *filter)
{
int i;
if (filter->preds) {
for (i = 0; i < filter->n_preds; i++)
kfree(filter->preds[i].ops);
kfree(filter->preds);
filter->preds = NULL;
}
filter->a_preds = 0;
filter->n_preds = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 74 | 100.00% | 2 | 100.00% |
Total | 74 | 100.00% | 2 | 100.00% |
static void filter_disable(struct trace_event_file *file)
{
unsigned long old_flags = file->flags;
file->flags &= ~EVENT_FILE_FL_FILTERED;
if (old_flags != file->flags)
trace_buffered_event_disable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 21 | 56.76% | 3 | 75.00% |
Tom Zanussi | 16 | 43.24% | 1 | 25.00% |
Total | 37 | 100.00% | 4 | 100.00% |
static void __free_filter(struct event_filter *filter)
{
if (!filter)
return;
__free_preds(filter);
kfree(filter->filter_string);
kfree(filter);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 32 | 94.12% | 4 | 80.00% |
Steven Rostedt | 2 | 5.88% | 1 | 20.00% |
Total | 34 | 100.00% | 5 | 100.00% |
void free_event_filter(struct event_filter *filter)
{
__free_filter(filter);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static struct event_filter *__alloc_filter(void)
{
struct event_filter *filter;
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
return filter;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 22 | 70.97% | 1 | 25.00% |
Steven Rostedt | 5 | 16.13% | 1 | 25.00% |
Li Zefan | 4 | 12.90% | 2 | 50.00% |
Total | 31 | 100.00% | 4 | 100.00% |
static int __alloc_preds(struct event_filter *filter, int n_preds)
{
struct filter_pred *pred;
int i;
if (filter->preds)
__free_preds(filter);
filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
if (!filter->preds)
return -ENOMEM;
filter->a_preds = n_preds;
filter->n_preds = 0;
for (i = 0; i < n_preds; i++) {
pred = &filter->preds[i];
pred->fn = filter_pred_none;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 62 | 56.88% | 3 | 50.00% |
Tom Zanussi | 43 | 39.45% | 1 | 16.67% |
Thomas Meyer | 3 | 2.75% | 1 | 16.67% |
Li Zefan | 1 | 0.92% | 1 | 16.67% |
Total | 109 | 100.00% | 6 | 100.00% |
static inline void __remove_filter(struct trace_event_file *file)
{
filter_disable(file);
remove_filter_string(file->filter);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 23 | 95.83% | 1 | 50.00% |
Steven Rostedt | 1 | 4.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
struct trace_array *tr)
{
struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
if (file->system != dir)
continue;
__remove_filter(file);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 21 | 46.67% | 1 | 12.50% |
Tom Zanussi | 16 | 35.56% | 2 | 25.00% |
Steven Rostedt | 5 | 11.11% | 4 | 50.00% |
Oleg Nesterov | 3 | 6.67% | 1 | 12.50% |
Total | 45 | 100.00% | 8 | 100.00% |
static inline void __free_subsystem_filter(struct trace_event_file *file)
{
__free_filter(file->filter);
file->filter = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 23 | 92.00% | 1 | 33.33% |
Li Zefan | 1 | 4.00% | 1 | 33.33% |
Steven Rostedt | 1 | 4.00% | 1 | 33.33% |
Total | 25 | 100.00% | 3 | 100.00% |
static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
struct trace_array *tr)
{
struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
if (file->system != dir)
continue;
__free_subsystem_filter(file);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 34 | 75.56% | 2 | 25.00% |
Li Zefan | 4 | 8.89% | 1 | 12.50% |
Steven Rostedt | 4 | 8.89% | 4 | 50.00% |
Oleg Nesterov | 3 | 6.67% | 1 | 12.50% |
Total | 45 | 100.00% | 8 | 100.00% |
static int filter_add_pred(struct filter_parse_state *ps,
struct event_filter *filter,
struct filter_pred *pred,
struct pred_stack *stack)
{
int err;
if (WARN_ON(filter->n_preds == filter->a_preds)) {
parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
return -ENOSPC;
}
err = filter_set_pred(filter, filter->n_preds, stack, pred);
if (err)
return err;
filter->n_preds++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 65 | 74.71% | 1 | 16.67% |
Steven Rostedt | 13 | 14.94% | 2 | 33.33% |
Li Zefan | 5 | 5.75% | 1 | 16.67% |
Jiri Olsa | 4 | 4.60% | 2 | 33.33% |
Total | 87 | 100.00% | 6 | 100.00% |
int filter_assign_type(const char *type)
{
if (strstr(type, "__data_loc") && strstr(type, "char"))
return FILTER_DYN_STRING;
if (strchr(type, '[') && strstr(type, "char"))
return FILTER_STATIC_STRING;
return FILTER_OTHER;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 25 | 49.02% | 1 | 25.00% |
Li Zefan | 17 | 33.33% | 2 | 50.00% |
Frédéric Weisbecker | 9 | 17.65% | 1 | 25.00% |
Total | 51 | 100.00% | 4 | 100.00% |
static bool is_legal_op(struct ftrace_event_field *field, enum filter_op_ids op)
{
if (is_string_field(field) &&
(op != OP_EQ && op != OP_NE && op != OP_GLOB))
return false;
if (!is_string_field(field) && op == OP_GLOB)
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 33 | 57.89% | 1 | 25.00% |
Li Zefan | 18 | 31.58% | 1 | 25.00% |
Yaowei Bai | 4 | 7.02% | 1 | 25.00% |
Steven Rostedt | 2 | 3.51% | 1 | 25.00% |
Total | 57 | 100.00% | 4 | 100.00% |
static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op,
int field_size, int field_is_signed)
{
filter_pred_fn_t fn = NULL;
switch (field_size) {
case 8:
if (op == OP_EQ || op == OP_NE)
fn = filter_pred_64;
else if (field_is_signed)
fn = pred_funcs_s64[op - PRED_FUNC_START];
else
fn = pred_funcs_u64[op - PRED_FUNC_START];
break;
case 4:
if (op == OP_EQ || op == OP_NE)
fn = filter_pred_32;
else if (field_is_signed)
fn = pred_funcs_s32[op - PRED_FUNC_START];
else
fn = pred_funcs_u32[op - PRED_FUNC_START];
break;
case 2:
if (op == OP_EQ || op == OP_NE)
fn = filter_pred_16;
else if (field_is_signed)
fn = pred_funcs_s16[op - PRED_FUNC_START];
else
fn = pred_funcs_u16[op - PRED_FUNC_START];
break;
case 1:
if (op == OP_EQ || op == OP_NE)
fn = filter_pred_8;
else if (field_is_signed)
fn = pred_funcs_s8[op - PRED_FUNC_START];
else
fn = pred_funcs_u8[op - PRED_FUNC_START];
break;
}
return fn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 148 | 74.75% | 2 | 50.00% |
Steven Rostedt | 50 | 25.25% | 2 | 50.00% |
Total | 198 | 100.00% | 4 | 100.00% |
static int init_pred(struct filter_parse_state *ps,
struct ftrace_event_field *field,
struct filter_pred *pred)
{
filter_pred_fn_t fn = filter_pred_none;
unsigned long long val;
int ret;
pred->offset = field->offset;
if (!is_legal_op(field, pred->op)) {
parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
return -EINVAL;
}
if (field->filter_type == FILTER_COMM) {
filter_build_regex(pred);
fn = filter_pred_comm;
pred->regex.field_len = TASK_COMM_LEN;
} else if (is_string_field(field)) {
filter_build_regex(pred);
if (field->filter_type == FILTER_STATIC_STRING) {
fn = filter_pred_string;
pred->regex.field_len = field->size;
} else if (field->filter_type == FILTER_DYN_STRING)
fn = filter_pred_strloc;
else
fn = filter_pred_pchar;
} else if (is_function_field(field)) {
if (strcmp(field->name, "ip")) {
parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
return -EINVAL;
}
} else {
if (field->is_signed)
ret = kstrtoll(pred->regex.pattern, 0, &val);
else
ret = kstrtoull(pred->regex.pattern, 0, &val);
if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;
}
pred->val = val;
if (field->filter_type == FILTER_CPU)
fn = filter_pred_cpu;
else
fn = select_comparison_fn(pred->op, field->size,
field->is_signed);
if (!fn) {
parse_error(ps, FILT_ERR_INVALID_OP, 0);
return -EINVAL;
}
}
if (pred->op == OP_NE)
pred->not ^= 1;
pred->fn = fn;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 154 | 46.81% | 2 | 12.50% |
Li Zefan | 50 | 15.20% | 4 | 25.00% |
Jiri Olsa | 46 | 13.98% | 4 | 25.00% |
Frédéric Weisbecker | 30 | 9.12% | 2 | 12.50% |
Daniel Wagner | 26 | 7.90% | 1 | 6.25% |
Steven Rostedt | 21 | 6.38% | 2 | 12.50% |
Daniel Walter | 2 | 0.61% | 1 | 6.25% |
Total | 329 | 100.00% | 16 | 100.00% |
static void parse_init(struct filter_parse_state *ps,
struct filter_op *ops,
char *infix_string)
{
memset(ps, '\0', sizeof(*ps));
ps->infix.string = infix_string;
ps->infix.cnt = strlen(infix_string);
ps->ops = ops;
INIT_LIST_HEAD(&ps->opstack);
INIT_LIST_HEAD(&ps->postfix);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 74 | 100.00% | 2 | 100.00% |
Total | 74 | 100.00% | 2 | 100.00% |
static char infix_next(struct filter_parse_state *ps)
{
if (!ps->infix.cnt)
return 0;
ps->infix.cnt--;
return ps->infix.string[ps->infix.tail++];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 33 | 73.33% | 2 | 66.67% |
Steven Rostedt | 12 | 26.67% | 1 | 33.33% |
Total | 45 | 100.00% | 3 | 100.00% |
static char infix_peek(struct filter_parse_state *ps)
{
if (ps->infix.tail == strlen(ps->infix.string))
return 0;
return ps->infix.string[ps->infix.tail];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 45 | 100.00% | 2 | 100.00% |
Total | 45 | 100.00% | 2 | 100.00% |
static void infix_advance(struct filter_parse_state *ps)
{
if (!ps->infix.cnt)
return;
ps->infix.cnt--;
ps->infix.tail++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 25 | 71.43% | 2 | 66.67% |
Steven Rostedt | 10 | 28.57% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
static inline int is_precedence_lower(struct filter_parse_state *ps,
int a, int b)
{
return ps->ops[a].precedence < ps->ops[b].precedence;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 37 | 100.00% | 2 | 100.00% |
Total | 37 | 100.00% | 2 | 100.00% |
static inline int is_op_char(struct filter_parse_state *ps, char c)
{
int i;
for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
if (ps->ops[i].string[0] == c)
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
static int infix_get_op(struct filter_parse_state *ps, char firstc)
{
char nextc = infix_peek(ps);
char opstr[3];
int i;
opstr[0] = firstc;
opstr[1] = nextc;
opstr[2] = '\0';
for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
if (!strcmp(opstr, ps->ops[i].string)) {
infix_advance(ps);
return ps->ops[i].id;
}
}
opstr[1] = '\0';
for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
if (!strcmp(opstr, ps->ops[i].string))
return ps->ops[i].id;
}
return OP_NONE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 173 | 100.00% | 5 | 100.00% |
Total | 173 | 100.00% | 5 | 100.00% |
static inline void clear_operand_string(struct filter_parse_state *ps)
{
memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
ps->operand.tail = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 33 | 100.00% | 2 | 100.00% |
Total | 33 | 100.00% | 2 | 100.00% |
static inline int append_operand_char(struct filter_parse_state *ps, char c)
{
if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
return -EINVAL;
ps->operand.string[ps->operand.tail++] = c;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 48 | 96.00% | 2 | 66.67% |
Li Zefan | 2 | 4.00% | 1 | 33.33% |
Total | 50 | 100.00% | 3 | 100.00% |
static int filter_opstack_push(struct filter_parse_state *ps,
enum filter_op_ids op)
{
struct opstack_op *opstack_op;
opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
if (!opstack_op)
return -ENOMEM;
opstack_op->op = op;
list_add(&opstack_op->list, &ps->opstack);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 62 | 96.88% | 3 | 75.00% |
Steven Rostedt | 2 | 3.12% | 1 | 25.00% |
Total | 64 | 100.00% | 4 | 100.00% |
static int filter_opstack_empty(struct filter_parse_state *ps)
{
return list_empty(&ps->opstack);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 20 | 100.00% | 3 | 100.00% |
Total | 20 | 100.00% | 3 | 100.00% |
static int filter_opstack_top(struct filter_parse_state *ps)
{
struct opstack_op *opstack_op;
if (filter_opstack_empty(ps))
return OP_NONE;
opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
return opstack_op->op;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 46 | 100.00% | 3 | 100.00% |
Total | 46 | 100.00% | 3 | 100.00% |
static int filter_opstack_pop(struct filter_parse_state *ps)
{
struct opstack_op *opstack_op;
enum filter_op_ids op;
if (filter_opstack_empty(ps))
return OP_NONE;
opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
op = opstack_op->op;
list_del(&opstack_op->list);
kfree(opstack_op);
return op;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 65 | 97.01% | 5 | 83.33% |
Steven Rostedt | 2 | 2.99% | 1 | 16.67% |
Total | 67 | 100.00% | 6 | 100.00% |
static void filter_opstack_clear(struct filter_parse_state *ps)
{
while (!filter_opstack_empty(ps))
filter_opstack_pop(ps);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 24 | 100.00% | 2 | 100.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static char *curr_operand(struct filter_parse_state *ps)
{
return ps->operand.string;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 19 | 100.00% | 2 | 100.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
{
struct postfix_elt *elt;
elt = kmalloc(sizeof(*elt), GFP_KERNEL);
if (!elt)
return -ENOMEM;
elt->op = OP_NONE;
elt->operand = kstrdup(operand, GFP_KERNEL);
if (!elt->operand) {
kfree(elt);
return -ENOMEM;
}
list_add_tail(&elt->list, &ps->postfix);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 93 | 100.00% | 4 | 100.00% |
Total | 93 | 100.00% | 4 | 100.00% |
static int postfix_append_op(struct filter_parse_state *ps, enum filter_op_ids op)
{
struct postfix_elt *elt;
elt = kmalloc(sizeof(*elt), GFP_KERNEL);
if (!elt)
return -ENOMEM;
elt->op = op;
elt->operand = NULL;
list_add_tail(&elt->list, &ps->postfix);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 68 | 97.14% | 3 | 75.00% |
Steven Rostedt | 2 | 2.86% | 1 | 25.00% |
Total | 70 | 100.00% | 4 | 100.00% |
static void postfix_clear(struct filter_parse_state *ps)
{
struct postfix_elt *elt;
while (!list_empty(&ps->postfix)) {
elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
list_del(&elt->list);
kfree(elt->operand);
kfree(elt);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 55 | 85.94% | 3 | 75.00% |
Li Zefan | 9 | 14.06% | 1 | 25.00% |
Total | 64 | 100.00% | 4 | 100.00% |
static int filter_parse(struct filter_parse_state *ps)
{
enum filter_op_ids op, top_op;
int in_string = 0;
char ch;
while ((ch = infix_next(ps))) {
if (ch == '"') {
in_string ^= 1;
continue;
}
if (in_string)
goto parse_operand;
if (isspace(ch))
continue;
if (is_op_char(ps, ch)) {
op = infix_get_op(ps, ch);
if (op == OP_NONE) {
parse_error(ps, FILT_ERR_INVALID_OP, 0);
return -EINVAL;
}
if (strlen(curr_operand(ps))) {
postfix_append_operand(ps, curr_operand(ps));
clear_operand_string(ps);
}
while (!filter_opstack_empty(ps)) {
top_op = filter_opstack_top(ps);
if (!is_precedence_lower(ps, top_op, op)) {
top_op = filter_opstack_pop(ps);
postfix_append_op(ps, top_op);
continue;
}
break;
}
filter_opstack_push(ps, op);
continue;
}
if (ch == '(') {
filter_opstack_push(ps, OP_OPEN_PAREN);
continue;
}
if (ch == ')') {
if (strlen(curr_operand(ps))) {
postfix_append_operand(ps, curr_operand(ps));
clear_operand_string(ps);
}
top_op = filter_opstack_pop(ps);
while (top_op != OP_NONE) {
if (top_op == OP_OPEN_PAREN)
break;
postfix_append_op(ps, top_op);
top_op = filter_opstack_pop(ps);
}
if (top_op == OP_NONE) {
parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
return -EINVAL;
}
continue;
}
parse_operand:
if (append_operand_char(ps, ch)) {
parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
return -EINVAL;
}
}
if (strlen(curr_operand(ps)))
postfix_append_operand(ps, curr_operand(ps));
while (!filter_opstack_empty(ps)) {
top_op = filter_opstack_pop(ps);
if (top_op == OP_NONE)
break;
if (top_op == OP_OPEN_PAREN) {
parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
return -EINVAL;
}
postfix_append_op(ps, top_op);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 366 | 91.73% | 6 | 66.67% |
Frédéric Weisbecker | 26 | 6.52% | 1 | 11.11% |
Steven Rostedt | 7 | 1.75% | 2 | 22.22% |
Total | 399 | 100.00% | 9 | 100.00% |
static struct filter_pred *create_pred(struct filter_parse_state *ps,
struct trace_event_call *call,
enum filter_op_ids op,
char *operand1, char *operand2)
{
struct ftrace_event_field *field;
static struct filter_pred pred;
memset(&pred, 0, sizeof(pred));
pred.op = op;
if (op == OP_AND || op == OP_OR)
return &pred;
if (!operand1 || !operand2) {
parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
return NULL;
}
field = trace_find_event_field(call, operand1);
if (!field) {
parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
return NULL;
}
strcpy(pred.regex.pattern, operand2);
pred.regex.len = strlen(pred.regex.pattern);
pred.field = field;
return init_pred(ps, field, &pred) ? NULL : &pred;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 93 | 54.39% | 4 | 28.57% |
Tom Zanussi | 63 | 36.84% | 5 | 35.71% |
Frédéric Weisbecker | 10 | 5.85% | 1 | 7.14% |
Steven Rostedt | 3 | 1.75% | 2 | 14.29% |
Jovi Zhangwei | 1 | 0.58% | 1 | 7.14% |
Li Zefan | 1 | 0.58% | 1 | 7.14% |
Total | 171 | 100.00% | 14 | 100.00% |
static int check_preds(struct filter_parse_state *ps)
{
int n_normal_preds = 0, n_logical_preds = 0;
struct postfix_elt *elt;
int cnt = 0;
list_for_each_entry(elt, &ps->postfix, list) {
if (elt->op == OP_NONE) {
cnt++;
continue;
}
if (elt->op == OP_AND || elt->op == OP_OR) {
n_logical_preds++;
cnt--;
continue;
}
if (elt->op != OP_NOT)
cnt--;
n_normal_preds++;
/* all ops should have operands */
if (cnt < 0)
break;
}
if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
return -EINVAL;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 94 | 72.31% | 2 | 50.00% |
Steven Rostedt | 36 | 27.69% | 2 | 50.00% |
Total | 130 | 100.00% | 4 | 100.00% |
static int count_preds(struct filter_parse_state *ps)
{
struct postfix_elt *elt;
int n_preds = 0;
list_for_each_entry(elt, &ps->postfix, list) {
if (elt->op == OP_NONE)
continue;
n_preds++;
}
return n_preds;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
struct check_pred_data {
int count;
int max;
};
static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
int *err, void *data)
{
struct check_pred_data *d = data;
if (WARN_ON(d->count++ > d->max)) {
*err = -EINVAL;
return WALK_PRED_ABORT;
}
return WALK_PRED_DEFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 32 | 55.17% | 1 | 50.00% |
Steven Rostedt | 26 | 44.83% | 1 | 50.00% |
Total | 58 | 100.00% | 2 | 100.00% |
/*
* The tree is walked at filtering of an event. If the tree is not correctly
* built, it may cause an infinite loop. Check here that the tree does
* indeed terminate.
*/
static int check_pred_tree(struct event_filter *filter,
struct filter_pred *root)
{
struct check_pred_data data = {
/*
* The max that we can hit a node is three times.
* Once going down, once coming up from left, and
* once coming up from right. This is more than enough
* since leafs are only hit a single time.
*/
.max = 3 * filter->n_preds,
.count = 0,
};
return walk_pred_tree(filter->preds, root,
check_pred_tree_cb, &data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 37 | 71.15% | 1 | 50.00% |
Steven Rostedt | 15 | 28.85% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
int *err, void *data)
{
int *count = data;
if ((move == MOVE_DOWN) &&
(pred->left == FILTER_PRED_INVALID))
(*count)++;
return WALK_PRED_DEFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 29 | 53.70% | 1 | 14.29% |
Steven Rostedt | 14 | 25.93% | 2 | 28.57% |
Tom Zanussi | 8 | 14.81% | 2 | 28.57% |
Li Zefan | 3 | 5.56% | 2 | 28.57% |
Total | 54 | 100.00% | 7 | 100.00% |
static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
{
int count = 0, ret;
ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
WARN_ON(ret);
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 29 | 64.44% | 1 | 50.00% |
Steven Rostedt | 16 | 35.56% | 1 | 50.00% |
Total | 45 | 100.00% | 2 | 100.00% |
struct fold_pred_data {
struct filter_pred *root;
int count;
int children;
};
static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
int *err, void *data)
{
struct fold_pred_data *d = data;
struct filter_pred *root = d->root;
if (move != MOVE_DOWN)
return WALK_PRED_DEFAULT;
if (pred->left != FILTER_PRED_INVALID)
return WALK_PRED_DEFAULT;
if (WARN_ON(d->count == d->children)) {
*err = -EINVAL;
return WALK_PRED_ABORT;
}
pred->index &= ~FILTER_PRED_FOLD;
root->ops[d->count++] = pred->index;
return WALK_PRED_DEFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 107 | 100.00% | 1 | 100.00% |
Total | 107 | 100.00% | 1 | 100.00% |
static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
{
struct fold_pred_data data = {
.root = root,
.count = 0,
};
int children;
/* No need to keep the fold flag */
root->index &= ~FILTER_PRED_FOLD;
/* If the root is a leaf then do nothing */
if (root->left == FILTER_PRED_INVALID)
return 0;
/* count the children */
children = count_leafs(preds, &preds[root->left]);
children += count_leafs(preds, &preds[root->right]);
root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
if (!root->ops)
return -ENOMEM;
root->val = children;
data.children = children;
return walk_pred_tree(preds, root, fold_pred_cb, &data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 120 | 85.11% | 1 | 33.33% |
Jiri Olsa | 18 | 12.77% | 1 | 33.33% |
Thomas Meyer | 3 | 2.13% | 1 | 33.33% |
Total | 141 | 100.00% | 3 | 100.00% |
static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
int *err, void *data)
{
struct filter_pred *preds = data;
if (move != MOVE_DOWN)
return WALK_PRED_DEFAULT;
if (!(pred->index & FILTER_PRED_FOLD))
return WALK_PRED_DEFAULT;
*err = fold_pred(preds, pred);
if (*err)
return WALK_PRED_ABORT;
/* eveyrhing below is folded, continue with parent */
return WALK_PRED_PARENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 44 | 58.67% | 1 | 50.00% |
Jiri Olsa | 31 | 41.33% | 1 | 50.00% |
Total | 75 | 100.00% | 2 | 100.00% |
/*
* To optimize the processing of the ops, if we have several "ors" or
* "ands" together, we can put them in an array and process them all
* together speeding up the filter logic.
*/
static int fold_pred_tree(struct event_filter *filter,
struct filter_pred *root)
{
return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
filter->preds);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 21 | 65.62% | 1 | 50.00% |
Steven Rostedt | 11 | 34.38% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
static int replace_preds(struct trace_event_call *call,
struct event_filter *filter,
struct filter_parse_state *ps,
bool dry_run)
{
char *operand1 = NULL, *operand2 = NULL;
struct filter_pred *pred;
struct filter_pred *root;
struct postfix_elt *elt;
struct pred_stack stack = { }; /* init to NULL */
int err;
int n_preds = 0;
n_preds = count_preds(ps);
if (n_preds >= MAX_FILTER_PRED) {
parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
return -ENOSPC;
}
err = check_preds(ps);
if (err)
return err;
if (!dry_run) {
err = __alloc_pred_stack(&stack, n_preds);
if (err)
return err;
err = __alloc_preds(filter, n_preds);
if (err)
goto fail;
}
n_preds = 0;
list_for_each_entry(elt, &ps->postfix, list) {
if (elt->op == OP_NONE) {
if (!operand1)
operand1 = elt->operand;
else if (!operand2)
operand2 = elt->operand;
else {
parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
err = -EINVAL;
goto fail;
}
continue;
}
if (elt->op == OP_NOT) {
if (!n_preds || operand1 || operand2) {
parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
err = -EINVAL;
goto fail;
}
if (!dry_run)
filter->preds[n_preds - 1].not ^= 1;
continue;
}
if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
err = -ENOSPC;
goto fail;
}
pred = create_pred(ps, call, elt->op, operand1, operand2);
if (!pred) {
err = -EINVAL;
goto fail;
}
if (!dry_run) {
err = filter_add_pred(ps, filter, pred, &stack);
if (err)
goto fail;
}
operand1 = operand2 = NULL;
}
if (!dry_run) {
/* We should have one item left on the stack */
pred = __pop_pred_stack(&stack);
if (!pred)
return -EINVAL;
/* This item is where we start from in matching */
root = pred;
/* Make sure the stack is empty */
pred = __pop_pred_stack(&stack);
if (WARN_ON(pred)) {
err = -EINVAL;
filter->root = NULL;
goto fail;
}
err = check_pred_tree(filter, root);
if (err)
goto fail;
/* Optimize the tree */
err = fold_pred_tree(filter, root);
if (err)
goto fail;
/* We don't set root until we know it works */
barrier();
filter->root = root;
}
err = 0;
fail:
__free_pred_stack(&stack);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 262 | 53.80% | 6 | 31.58% |
Tom Zanussi | 182 | 37.37% | 7 | 36.84% |
Li Zefan | 28 | 5.75% | 3 | 15.79% |
Jiri Olsa | 15 | 3.08% | 3 | 15.79% |
Total | 487 | 100.00% | 19 | 100.00% |
static inline void event_set_filtered_flag(struct trace_event_file *file)
{
unsigned long old_flags = file->flags;
file->flags |= EVENT_FILE_FL_FILTERED;
if (old_flags != file->flags)
trace_buffered_event_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 21 | 56.76% | 3 | 75.00% |
Tom Zanussi | 16 | 43.24% | 1 | 25.00% |
Total | 37 | 100.00% | 4 | 100.00% |
static inline void event_set_filter(struct trace_event_file *file,
struct event_filter *filter)
{
rcu_assign_pointer(file->filter, filter);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 25 | 96.15% | 1 | 50.00% |
Steven Rostedt | 1 | 3.85% | 1 | 50.00% |
Total | 26 | 100.00% | 2 | 100.00% |
static inline void event_clear_filter(struct trace_event_file *file)
{
RCU_INIT_POINTER(file->filter, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 20 | 95.24% | 1 | 50.00% |
Steven Rostedt | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void
event_set_no_set_filter_flag(struct trace_event_file *file)
{
file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 16 | 88.89% | 1 | 33.33% |
Steven Rostedt | 2 | 11.11% | 2 | 66.67% |
Total | 18 | 100.00% | 3 | 100.00% |
static inline void
event_clear_no_set_filter_flag(struct trace_event_file *file)
{
file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 14 | 73.68% | 1 | 25.00% |
Steven Rostedt | 5 | 26.32% | 3 | 75.00% |
Total | 19 | 100.00% | 4 | 100.00% |
static inline bool
event_no_set_filter_flag(struct trace_event_file *file)
{
if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 19 | 73.08% | 1 | 20.00% |
Steven Rostedt | 5 | 19.23% | 3 | 60.00% |
Li Zefan | 2 | 7.69% | 1 | 20.00% |
Total | 26 | 100.00% | 5 | 100.00% |
struct filter_list {
struct list_head list;
struct event_filter *filter;
};
static int replace_system_preds(struct trace_subsystem_dir *dir,
struct trace_array *tr,
struct filter_parse_state *ps,
char *filter_string)
{
struct trace_event_file *file;
struct filter_list *filter_item;
struct filter_list *tmp;
LIST_HEAD(filter_list);
bool fail = true;
int err;
list_for_each_entry(file, &tr->events, list) {
if (file->system != dir)
continue;
/*
* Try to see if the filter can be applied
* (filter arg is ignored on dry_run)
*/
err = replace_preds(file->event_call, NULL, ps, true);
if (err)
event_set_no_set_filter_flag(file);
else
event_clear_no_set_filter_flag(file);
}
list_for_each_entry(file, &tr->events, list) {
struct event_filter *filter;
if (file->system != dir)
continue;
if (event_no_set_filter_flag(file))
continue;
filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
if (!filter_item)
goto fail_mem;
list_add_tail(&filter_item->list, &filter_list);
filter_item->filter = __alloc_filter();
if (!filter_item->filter)
goto fail_mem;
filter = filter_item->filter;
/* Can only fail on no memory */
err = replace_filter_string(filter, filter_string);
if (err)
goto fail_mem;
err = replace_preds(file->event_call, filter, ps, false);
if (err) {
filter_disable(file);
parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
append_filter_err(ps, filter);
} else
event_set_filtered_flag(file);
/*
* Regardless of if this returned an error, we still
* replace the filter for the call.
*/
filter = event_filter(file);
event_set_filter(file, filter_item->filter);
filter_item->filter = filter;
fail = false;
}
if (fail)
goto fail;
/*
* The calls can still be using the old filters.
* Do a synchronize_sched() to ensure all calls are
* done with them before we free them.
*/
synchronize_sched();
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
__free_filter(filter_item->filter);
list_del(&filter_item->list);
kfree(filter_item);
}
return 0;
fail:
/* No call succeeded */
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
list_del(&filter_item->list);
kfree(filter_item);
}
parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
return -EINVAL;
fail_mem:
/* If any call succeeded, we still need to sync */
if (!fail)
synchronize_sched();
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
__free_filter(filter_item->filter);
list_del(&filter_item->list);
kfree(filter_item);
}
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 262 | 62.98% | 5 | 29.41% |
Li Zefan | 84 | 20.19% | 3 | 17.65% |
Tom Zanussi | 48 | 11.54% | 5 | 29.41% |
Oleg Nesterov | 11 | 2.64% | 1 | 5.88% |
Ingo Molnar | 5 | 1.20% | 1 | 5.88% |
Tejun Heo | 3 | 0.72% | 1 | 5.88% |
Xiao Guangrong | 3 | 0.72% | 1 | 5.88% |
Total | 416 | 100.00% | 17 | 100.00% |
static int create_filter_start(char *filter_str, bool set_str,
struct filter_parse_state **psp,
struct event_filter **filterp)
{
struct event_filter *filter;
struct filter_parse_state *ps = NULL;
int err = 0;
WARN_ON_ONCE(*psp || *filterp);
/* allocate everything, and if any fails, free all and fail */
filter = __alloc_filter();
if (filter && set_str)
err = replace_filter_string(filter, filter_str);
ps = kzalloc(sizeof(*ps), GFP_KERNEL);
if (!filter || !ps || err) {
kfree(ps);
__free_filter(filter);
return -ENOMEM;
}
/* we're committed to creating a new filter */
*filterp = filter;
*psp = ps;
parse_init(ps, filter_ops, filter_str);
err = filter_parse(ps);
if (err && set_str)
append_filter_err(ps, filter);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 111 | 72.08% | 1 | 20.00% |
Tom Zanussi | 24 | 15.58% | 2 | 40.00% |
Steven Rostedt | 19 | 12.34% | 2 | 40.00% |
Total | 154 | 100.00% | 5 | 100.00% |
static void create_filter_finish(struct filter_parse_state *ps)
{
if (ps) {
filter_opstack_clear(ps);
postfix_clear(ps);
kfree(ps);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 31 | 96.88% | 2 | 66.67% |
Steven Rostedt | 1 | 3.12% | 1 | 33.33% |
Total | 32 | 100.00% | 3 | 100.00% |
/**
* create_filter - create a filter for a trace_event_call
* @call: trace_event_call to create a filter for
* @filter_str: filter string
* @set_str: remember @filter_str and enable detailed error in filter
* @filterp: out param for created filter (always updated on return)
*
* Creates a filter for @call with @filter_str. If @set_str is %true,
* @filter_str is copied and recorded in the new filter.
*
* On success, returns 0 and *@filterp points to the new filter. On
* failure, returns -errno and *@filterp may point to %NULL or to a new
* filter. In the latter case, the returned filter contains error
* information if @set_str is %true and the caller is responsible for
* freeing it.
*/
static int create_filter(struct trace_event_call *call,
char *filter_str, bool set_str,
struct event_filter **filterp)
{
struct event_filter *filter = NULL;
struct filter_parse_state *ps = NULL;
int err;
err = create_filter_start(filter_str, set_str, &ps, &filter);
if (!err) {
err = replace_preds(call, filter, ps, false);
if (err && set_str)
append_filter_err(ps, filter);
}
if (err && !set_str) {
free_event_filter(filter);
filter = NULL;
}
create_filter_finish(ps);
*filterp = filter;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 91 | 75.83% | 2 | 25.00% |
Steven Rostedt | 24 | 20.00% | 4 | 50.00% |
Tom Zanussi | 5 | 4.17% | 2 | 25.00% |
Total | 120 | 100.00% | 8 | 100.00% |
int create_event_filter(struct trace_event_call *call,
char *filter_str, bool set_str,
struct event_filter **filterp)
{
return create_filter(call, filter_str, set_str, filterp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 34 | 97.14% | 1 | 50.00% |
Steven Rostedt | 1 | 2.86% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
/**
* create_system_filter - create a filter for an event_subsystem
* @system: event_subsystem to create a filter for
* @filter_str: filter string
* @filterp: out param for created filter (always updated on return)
*
* Identical to create_filter() except that it creates a subsystem filter
* and always remembers @filter_str.
*/
static int create_system_filter(struct trace_subsystem_dir *dir,
struct trace_array *tr,
char *filter_str, struct event_filter **filterp)
{
struct event_filter *filter = NULL;
struct filter_parse_state *ps = NULL;
int err;
err = create_filter_start(filter_str, true, &ps, &filter);
if (!err) {
err = replace_system_preds(dir, tr, ps, filter_str);
if (!err) {
/* System filters just show a default message */
kfree(filter->filter_string);
filter->filter_string = NULL;
} else {
append_filter_err(ps, filter);
}
}
create_filter_finish(ps);
*filterp = filter;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 103 | 84.43% | 1 | 14.29% |
Tom Zanussi | 13 | 10.66% | 3 | 42.86% |
Li Zefan | 3 | 2.46% | 1 | 14.29% |
Oleg Nesterov | 2 | 1.64% | 1 | 14.29% |
Steven Rostedt | 1 | 0.82% | 1 | 14.29% |
Total | 122 | 100.00% | 7 | 100.00% |
/* caller must hold event_mutex */
int apply_event_filter(struct trace_event_file *file, char *filter_string)
{
struct trace_event_call *call = file->event_call;
struct event_filter *filter;
int err;
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(file);
filter = event_filter(file);
if (!filter)
return 0;
event_clear_filter(file);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
return 0;
}
err = create_filter(call, filter_string, true, &filter);
/*
* Always swap the call filter with the new filter
* even if there was an error. If there was an error
* in the filter, we disable the filter and show the error
* string
*/
if (filter) {
struct event_filter *tmp;
tmp = event_filter(file);
if (!err)
event_set_filtered_flag(file);
else
filter_disable(file);
event_set_filter(file, filter);
if (tmp) {
/* Make sure the call is done with the filter */
synchronize_sched();
__free_filter(tmp);
}
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 63 | 40.13% | 2 | 18.18% |
Tom Zanussi | 57 | 36.31% | 3 | 27.27% |
Steven Rostedt | 29 | 18.47% | 3 | 27.27% |
Oleg Nesterov | 6 | 3.82% | 1 | 9.09% |
Li Zefan | 2 | 1.27% | 2 | 18.18% |
Total | 157 | 100.00% | 11 | 100.00% |
int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
char *filter_string)
{
struct event_subsystem *system = dir->subsystem;
struct trace_array *tr = dir->tr;
struct event_filter *filter;
int err = 0;
mutex_lock(&event_mutex);
/* Make sure the system still has events */
if (!dir->nr_events) {
err = -ENODEV;
goto out_unlock;
}
if (!strcmp(strstrip(filter_string), "0")) {
filter_free_subsystem_preds(dir, tr);
remove_filter_string(system->filter);
filter = system->filter;
system->filter = NULL;
/* Ensure all filters are no longer used */
synchronize_sched();
filter_free_subsystem_filters(dir, tr);
__free_filter(filter);
goto out_unlock;
}
err = create_system_filter(dir, tr, filter_string, &filter);
if (filter) {
/*
* No event actually uses the system filter
* we can free it without synchronize_sched().
*/
__free_filter(system->filter);
system->filter = filter;
}
out_unlock:
mutex_unlock(&event_mutex);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 82 | 47.95% | 4 | 28.57% |
Tom Zanussi | 67 | 39.18% | 4 | 28.57% |
Tejun Heo | 8 | 4.68% | 1 | 7.14% |
Li Zefan | 8 | 4.68% | 3 | 21.43% |
Ingo Molnar | 3 | 1.75% | 1 | 7.14% |
Oleg Nesterov | 3 | 1.75% | 1 | 7.14% |
Total | 171 | 100.00% | 14 | 100.00% |
#ifdef CONFIG_PERF_EVENTS
void ftrace_profile_free_filter(struct perf_event *event)
{
struct event_filter *filter = event->filter;
event->filter = NULL;
__free_filter(filter);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Li Zefan | 29 | 96.67% | 1 | 50.00% |
Steven Rostedt | 1 | 3.33% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
struct function_filter_data {
struct ftrace_ops *ops;
int first_filter;
int first_notrace;
};
#ifdef CONFIG_FUNCTION_TRACER
static char **
ftrace_function_filter_re(char *buf, int len, int *count)
{
char *str, **re;
str = kstrndup(buf, len, GFP_KERNEL);
if (!str)
return NULL;
/*
* The argv_split function takes white space
* as a separator, so convert ',' into spaces.
*/
strreplace(str, ',', ' ');
re = argv_split(GFP_KERNEL, str, count);
kfree(str);
return re;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 54 | 72.00% | 2 | 40.00% |
Li Zefan | 17 | 22.67% | 1 | 20.00% |
Rasmus Villemoes | 3 | 4.00% | 1 | 20.00% |
Ingo Molnar | 1 | 1.33% | 1 | 20.00% |
Total | 75 | 100.00% | 5 | 100.00% |
static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
int reset, char *re, int len)
{
int ret;
if (filter)
ret = ftrace_set_filter(ops, re, len, reset);
else
ret = ftrace_set_notrace(ops, re, len, reset);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 50 | 81.97% | 1 | 33.33% |
Li Zefan | 10 | 16.39% | 1 | 33.33% |
Tejun Heo | 1 | 1.64% | 1 | 33.33% |
Total | 61 | 100.00% | 3 | 100.00% |
static int __ftrace_function_set_filter(int filter, char *buf, int len,
struct function_filter_data *data)
{
int i, re_cnt, ret = -EINVAL;
int *reset;
char **re;
reset = filter ? &data->first_filter : &data->first_notrace;
/*
* The 'ip' field could have multiple filters set, separated
* either by space or comma. We first cut the filter and apply
* all pieces separatelly.
*/
re = ftrace_function_filter_re(buf, len, &re_cnt);
if (!re)
return -EINVAL;
for (i = 0; i < re_cnt; i++) {
ret = ftrace_function_set_regexp(data->ops, filter, *reset,
re[i], strlen(re[i]));
if (ret)
break;
if (*reset)
*reset = 0;
}
argv_free(re);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 137 | 97.16% | 2 | 66.67% |
Li Zefan | 4 | 2.84% | 1 | 33.33% |
Total | 141 | 100.00% | 3 | 100.00% |
static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
{
struct ftrace_event_field *field = pred->field;
if (leaf) {
/*
* Check the leaf predicate for function trace, verify:
* - only '==' and '!=' is used
* - the 'ip' field is used
*/
if ((pred->op != OP_EQ) && (pred->op != OP_NE))
return -EINVAL;
if (strcmp(field->name, "ip"))
return -EINVAL;
} else {
/*
* Check the non leaf predicate for function trace, verify:
* - only '||' is used
*/
if (pred->op != OP_OR)
return -EINVAL;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 86 | 100.00% | 1 | 100.00% |
Total | 86 | 100.00% | 1 | 100.00% |
static int ftrace_function_set_filter_cb(enum move_type move,
struct filter_pred *pred,
int *err, void *data)
{
/* Checking the node is valid for function trace. */
if ((move != MOVE_DOWN) ||
(pred->left != FILTER_PRED_INVALID)) {
*err = ftrace_function_check_pred(pred, 0);
} else {
*err = ftrace_function_check_pred(pred, 1);
if (*err)
return WALK_PRED_ABORT;
*err = __ftrace_function_set_filter(pred->op == OP_EQ,
pred->regex.pattern,
pred->regex.len,
data);
}
return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 109 | 100.00% | 1 | 100.00% |
Total | 109 | 100.00% | 1 | 100.00% |
static int ftrace_function_set_filter(struct perf_event *event,
struct event_filter *filter)
{
struct function_filter_data data = {
.first_filter = 1,
.first_notrace = 1,
.ops = &event->ftrace_ops,
};
return walk_pred_tree(filter->preds, filter->root,
ftrace_function_set_filter_cb, &data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
#else
static int ftrace_function_set_filter(struct perf_event *event,
struct event_filter *filter)
{
return -ENODEV;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_FUNCTION_TRACER */
int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str)
{
int err;
struct event_filter *filter;
struct trace_event_call *call;
mutex_lock(&event_mutex);
call = event->tp_event;
err = -EINVAL;
if (!call)
goto out_unlock;
err = -EEXIST;
if (event->filter)
goto out_unlock;
err = create_filter(call, filter_str, false, &filter);
if (err)
goto free_filter;
if (ftrace_event_is_function(call))
err = ftrace_function_set_filter(event, filter);
else
event->filter = filter;
free_filter:
if (err || ftrace_event_is_function(call))
__free_filter(filter);
out_unlock:
mutex_unlock(&event_mutex);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 122 | 87.14% | 1 | 20.00% |
Li Zefan | 14 | 10.00% | 1 | 20.00% |
Ingo Molnar | 2 | 1.43% | 1 | 20.00% |
Steven Rostedt | 2 | 1.43% | 2 | 40.00% |
Total | 140 | 100.00% | 5 | 100.00% |
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_FTRACE_STARTUP_TEST
#include <linux/types.h>
#include <linux/tracepoint.h>
#define CREATE_TRACE_POINTS
#include "trace_events_filter_test.h"
#define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
{ \
.filter = FILTER, \
.rec = { .a = va, .b = vb, .c = vc, .d = vd, \
.e = ve, .f = vf, .g = vg, .h = vh }, \
.match = m, \
.not_visited = nvisit, \
}
#define YES 1
#define NO 0
static struct test_filter_data_t {
char *filter;
struct trace_event_raw_ftrace_test_filter rec;
int match;
char *not_visited;
} test_filter_data[] = {
#define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
"e == 1 && f == 1 && g == 1 && h == 1"
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
#undef FILTER
#define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
"e == 1 || f == 1 || g == 1 || h == 1"
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
#undef FILTER
#define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
"(e == 1 || f == 1) && (g == 1 || h == 1)"
DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
#undef FILTER
#define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
"(e == 1 && f == 1) || (g == 1 && h == 1)"
DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
#undef FILTER
#define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
"(e == 1 && f == 1) || (g == 1 && h == 1)"
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
#undef FILTER
#define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
"(e == 1 || f == 1)) && (g == 1 || h == 1)"
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
#undef FILTER
#define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
"(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
#undef FILTER
#define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
"(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
};
#undef DATA_REC
#undef FILTER
#undef YES
#undef NO
#define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
static int test_pred_visited;
static int test_pred_visited_fn(struct filter_pred *pred, void *event)
{
struct ftrace_event_field *field = pred->field;
test_pred_visited = 1;
printk(KERN_INFO "\npred visited %s\n", field->name);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
int *err, void *data)
{
char *fields = data;
if ((move == MOVE_DOWN) &&
(pred->left == FILTER_PRED_INVALID)) {
struct ftrace_event_field *field = pred->field;
if (!field) {
WARN(1, "all leafs should have field defined");
return WALK_PRED_DEFAULT;
}
if (!strchr(fields, *field->name))
return WALK_PRED_DEFAULT;
WARN_ON(!pred->fn);
pred->fn = test_pred_visited_fn;
}
return WALK_PRED_DEFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 106 | 100.00% | 1 | 100.00% |
Total | 106 | 100.00% | 1 | 100.00% |
static __init int ftrace_test_event_filter(void)
{
int i;
printk(KERN_INFO "Testing ftrace filter: ");
for (i = 0; i < DATA_CNT; i++) {
struct event_filter *filter = NULL;
struct test_filter_data_t *d = &test_filter_data[i];
int err;
err = create_filter(&event_ftrace_test_filter, d->filter,
false, &filter);
if (err) {
printk(KERN_INFO
"Failed to get filter for '%s', err %d\n",
d->filter, err);
__free_filter(filter);
break;
}
/*
* The preemption disabling is not really needed for self
* tests, but the rcu dereference will complain without it.
*/
preempt_disable();
if (*d->not_visited)
walk_pred_tree(filter->preds, filter->root,
test_walk_pred_cb,
d->not_visited);
test_pred_visited = 0;
err = filter_match_preds(filter, &d->rec);
preempt_enable();
__free_filter(filter);
if (test_pred_visited) {
printk(KERN_INFO
"Failed, unwanted pred visited for filter %s\n",
d->filter);
break;
}
if (err != d->match) {
printk(KERN_INFO
"Failed to match filter '%s', expected %d\n",
d->filter, d->match);
break;
}
}
if (i == DATA_CNT)
printk(KERN_CONT "OK\n");
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 187 | 91.67% | 1 | 33.33% |
Tejun Heo | 10 | 4.90% | 1 | 33.33% |
Steven Rostedt | 7 | 3.43% | 1 | 33.33% |
Total | 204 | 100.00% | 3 | 100.00% |
late_initcall(ftrace_test_event_filter);
#endif /* CONFIG_FTRACE_STARTUP_TEST */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Zanussi | 3590 | 35.19% | 11 | 10.58% |
Jiri Olsa | 2623 | 25.71% | 13 | 12.50% |
Steven Rostedt | 2091 | 20.50% | 34 | 32.69% |
Frédéric Weisbecker | 572 | 5.61% | 4 | 3.85% |
Li Zefan | 554 | 5.43% | 25 | 24.04% |
Tejun Heo | 425 | 4.17% | 3 | 2.88% |
Daniel Wagner | 189 | 1.85% | 1 | 0.96% |
Masami Hiramatsu | 84 | 0.82% | 1 | 0.96% |
Oleg Nesterov | 32 | 0.31% | 2 | 1.92% |
Ingo Molnar | 15 | 0.15% | 2 | 1.92% |
Thomas Meyer | 11 | 0.11% | 1 | 0.96% |
Yaowei Bai | 4 | 0.04% | 1 | 0.96% |
Rasmus Villemoes | 3 | 0.03% | 1 | 0.96% |
Jovi Zhangwei | 3 | 0.03% | 2 | 1.92% |
Xiao Guangrong | 3 | 0.03% | 1 | 0.96% |
Daniel Walter | 2 | 0.02% | 1 | 0.96% |
Michal Hocko | 1 | 0.01% | 1 | 0.96% |
Total | 10202 | 100.00% | 104 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.