Release 4.10 arch/powerpc/platforms/powernv/opal-tracepoints.c
#include <linux/percpu.h>
#include <linux/jump_label.h>
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
#ifdef HAVE_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
int opal_tracepoint_regfunc(void)
{
static_key_slow_inc(&opal_tracepoint_key);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 12 | 75.00% | 1 | 50.00% |
steven rostedt | steven rostedt | 4 | 25.00% | 1 | 50.00% |
| Total | 16 | 100.00% | 2 | 100.00% |
void opal_tracepoint_unregfunc(void)
{
static_key_slow_dec(&opal_tracepoint_key);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 13 | 100.00% | 1 | 100.00% |
| Total | 13 | 100.00% | 1 | 100.00% |
#else
/*
* We optimise OPAL calls by placing opal_tracepoint_refcount
* directly in the TOC so we can check if the opal tracepoints are
* enabled via a single load.
*/
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long opal_tracepoint_refcount;
int opal_tracepoint_regfunc(void)
{
opal_tracepoint_refcount++;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 9 | 69.23% | 1 | 50.00% |
steven rostedt | steven rostedt | 4 | 30.77% | 1 | 50.00% |
| Total | 13 | 100.00% | 2 | 100.00% |
void opal_tracepoint_unregfunc(void)
{
opal_tracepoint_refcount--;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
#endif
/*
* Since the tracing code might execute OPAL calls we need to guard against
* recursion.
*/
static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
void __trace_opal_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = this_cpu_ptr(&opal_trace_depth);
if (*depth)
goto out;
(*depth)++;
preempt_disable();
trace_opal_entry(opcode, args);
(*depth)--;
out:
local_irq_restore(flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 71 | 97.26% | 1 | 50.00% |
christoph lameter | christoph lameter | 2 | 2.74% | 1 | 50.00% |
| Total | 73 | 100.00% | 2 | 100.00% |
void __trace_opal_exit(long opcode, unsigned long retval)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = this_cpu_ptr(&opal_trace_depth);
if (*depth)
goto out;
(*depth)++;
trace_opal_exit(opcode, retval);
preempt_enable();
(*depth)--;
out:
local_irq_restore(flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 69 | 97.18% | 1 | 50.00% |
christoph lameter | christoph lameter | 2 | 2.82% | 1 | 50.00% |
| Total | 71 | 100.00% | 2 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
anton blanchard | anton blanchard | 220 | 93.22% | 1 | 20.00% |
steven rostedt | steven rostedt | 8 | 3.39% | 1 | 20.00% |
christoph lameter | christoph lameter | 4 | 1.69% | 1 | 20.00% |
daniel axtens | daniel axtens | 3 | 1.27% | 1 | 20.00% |
zhouyi zhou | zhouyi zhou | 1 | 0.42% | 1 | 20.00% |
| Total | 236 | 100.00% | 5 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.