Contributors: 15
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Steven Rostedt |
183 |
50.14% |
8 |
28.57% |
Joel A Fernandes |
98 |
26.85% |
3 |
10.71% |
Arnaldo Carvalho de Melo |
18 |
4.93% |
1 |
3.57% |
Ingo Molnar |
16 |
4.38% |
3 |
10.71% |
Thomas Gleixner |
16 |
4.38% |
1 |
3.57% |
Masami Hiramatsu |
9 |
2.47% |
1 |
3.57% |
Peter Zijlstra |
5 |
1.37% |
2 |
7.14% |
Linus Torvalds |
4 |
1.10% |
1 |
3.57% |
Andrew Morton |
4 |
1.10% |
1 |
3.57% |
Jan Beulich |
3 |
0.82% |
1 |
3.57% |
Linus Torvalds (pre-git) |
3 |
0.82% |
2 |
7.14% |
Jan Engelhardt |
2 |
0.55% |
1 |
3.57% |
Suresh B. Siddha |
2 |
0.55% |
1 |
3.57% |
John Hawkes |
1 |
0.27% |
1 |
3.57% |
Nick Desaulniers |
1 |
0.27% |
1 |
3.57% |
Total |
365 |
|
28 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
// SPDX-License-Identifier: GPL-2.0
/*
* preemptoff and irqoff tracepoints
*
* Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
*/
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/hardirq.h>
#include "trace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/preemptirq.h>
/*
* Use regular trace points on architectures that implement noinstr
* tooling: these calls will only happen with RCU enabled, which can
* use a regular tracepoint.
*
* On older architectures, RCU may not be watching in idle. In that
* case, wake up RCU to watch while calling the tracepoint. These
* aren't NMI-safe - so exclude NMI contexts:
*/
#ifdef CONFIG_ARCH_WANTS_NO_INSTR
#define trace(point, args) trace_##point(args)
#else
#define trace(point, args) \
do { \
if (trace_##point##_enabled()) { \
bool exit_rcu = false; \
if (in_nmi()) \
break; \
if (!IS_ENABLED(CONFIG_TINY_RCU) && \
is_idle_task(current)) { \
ct_irq_enter(); \
exit_rcu = true; \
} \
trace_##point(args); \
if (exit_rcu) \
ct_irq_exit(); \
} \
} while (0)
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU(int, tracing_irq_cpu);
/*
* Like trace_hardirqs_on() but without the lockdep invocation. This is
* used in the low level entry code where the ordering vs. RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section.
*/
void trace_hardirqs_on_prepare(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
}
EXPORT_SYMBOL(trace_hardirqs_on_prepare);
NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
void trace_hardirqs_on(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
lockdep_hardirqs_on_prepare();
lockdep_hardirqs_on(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
NOKPROBE_SYMBOL(trace_hardirqs_on);
/*
* Like trace_hardirqs_off() but without the lockdep invocation. This is
* used in the low level entry code where the ordering vs. RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section.
*/
void trace_hardirqs_off_finish(void)
{
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
}
}
EXPORT_SYMBOL(trace_hardirqs_off_finish);
NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
void trace_hardirqs_off(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
}
}
EXPORT_SYMBOL(trace_hardirqs_off);
NOKPROBE_SYMBOL(trace_hardirqs_off);
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
trace(preempt_enable, TP_ARGS(a0, a1));
tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
trace(preempt_disable, TP_ARGS(a0, a1));
tracer_preempt_off(a0, a1);
}
#endif