Release 4.7 drivers/oprofile/cpu_buffer.c
/**
* @file cpu_buffer.c
*
* @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf <barry.kasindorf@amd.com>
* @author Robert Richter <robert.richter@amd.com>
*
* Each CPU has a local buffer that stores PC value/event
* pairs. We also log context switches when we notice them.
* Eventually each CPU's buffer is processed into the global
* event buffer by sync_buffer().
*
* We use a local buffer for two reasons: an NMI or similar
* interrupt cannot synchronise, and high sampling rates
* would lead to catastrophic global synchronisation if
* a global buffer was used.
*/
#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/errno.h>
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprof.h"
#define OP_BUFFER_FLAGS 0
static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;
unsigned long oprofile_get_cpu_buffer_size(void)
{
return oprofile_cpu_buffer_size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
carl love | carl love | 10 | 90.91% | 1 | 50.00% |
robert richter | robert richter | 1 | 9.09% | 1 | 50.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
carl love | carl love | 20 | 86.96% | 1 | 33.33% |
christoph lameter | christoph lameter | 2 | 8.70% | 1 | 33.33% |
tejun heo | tejun heo | 1 | 4.35% | 1 | 33.33% |
| Total | 23 | 100.00% | 3 | 100.00% |
void free_cpu_buffers(void)
{
if (op_ring_buffer)
ring_buffer_free(op_ring_buffer);
op_ring_buffer = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 17 | 85.00% | 1 | 50.00% |
andi kleen | andi kleen | 3 | 15.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
#define RB_EVENT_HDR_SIZE 4
int alloc_cpu_buffers(void)
{
int i;
unsigned long buffer_size = oprofile_cpu_buffer_size;
unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
RB_EVENT_HDR_SIZE);
op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer)
goto fail;
for_each_possible_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
b->last_task = NULL;
b->last_is_kernel = -1;
b->tracing = 0;
b->buffer_size = buffer_size;
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->backtrace_aborted = 0;
b->sample_invalid_eip = 0;
b->cpu = i;
INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
}
return 0;
fail:
free_cpu_buffers();
return -ENOMEM;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
john levon | john levon | 80 | 55.17% | 4 | 23.53% |
robert richter | robert richter | 32 | 22.07% | 3 | 17.65% |
philippe elie | philippe elie | 12 | 8.28% | 1 | 5.88% |
greg banks | greg banks | 6 | 4.14% | 1 | 5.88% |
mike travis | mike travis | 4 | 2.76% | 1 | 5.88% |
anton blanchard | anton blanchard | 3 | 2.07% | 1 | 5.88% |
pekka j enberg | pekka j enberg | 2 | 1.38% | 1 | 5.88% |
andi kleen | andi kleen | 2 | 1.38% | 1 | 5.88% |
david howells | david howells | 1 | 0.69% | 1 | 5.88% |
chris j arges | chris j arges | 1 | 0.69% | 1 | 5.88% |
tejun heo | tejun heo | 1 | 0.69% | 1 | 5.88% |
mika kukkonen | mika kukkonen | 1 | 0.69% | 1 | 5.88% |
| Total | 145 | 100.00% | 17 | 100.00% |
void start_cpu_work(void)
{
int i;
work_enabled = 1;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
* fire at once.
*/
schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
john levon | john levon | 32 | 66.67% | 2 | 33.33% |
anton blanchard | anton blanchard | 11 | 22.92% | 2 | 33.33% |
mike travis | mike travis | 4 | 8.33% | 1 | 16.67% |
tejun heo | tejun heo | 1 | 2.08% | 1 | 16.67% |
| Total | 48 | 100.00% | 6 | 100.00% |
void end_cpu_work(void)
{
work_enabled = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
john levon | john levon | 10 | 90.91% | 2 | 66.67% |
tejun heo | tejun heo | 1 | 9.09% | 1 | 33.33% |
| Total | 11 | 100.00% | 3 | 100.00% |
void flush_cpu_work(void)
{
int i;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
/* these works are per-cpu, no need for flush_sync */
flush_delayed_work(&b->work);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
john levon | john levon | 16 | 42.11% | 1 | 14.29% |
tejun heo | tejun heo | 12 | 31.58% | 2 | 28.57% |
anton blanchard | anton blanchard | 5 | 13.16% | 2 | 28.57% |
mike travis | mike travis | 4 | 10.53% | 1 | 14.29% |
greg banks | greg banks | 1 | 2.63% | 1 | 14.29% |
| Total | 38 | 100.00% | 7 | 100.00% |
/*
* This function prepares the cpu buffer to write a sample.
*
* Struct op_entry is used during operations on the ring buffer while
* struct op_sample contains the data that is stored in the ring
* buffer. Struct entry can be uninitialized. The function reserves a
* data array that is specified by size. Use
* op_cpu_buffer_write_commit() after preparing the sample. In case of
* errors a null pointer is returned, otherwise the pointer to the
* sample.
*
*/
struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
{
entry->event = ring_buffer_lock_reserve
(op_ring_buffer, sizeof(struct op_sample) +
size * sizeof(entry->sample->data[0]));
if (!entry->event)
return NULL;
entry->sample = ring_buffer_event_data(entry->event);
entry->size = size;
entry->data = entry->sample->data;
return entry->sample;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 82 | 94.25% | 2 | 66.67% |
andi kleen | andi kleen | 5 | 5.75% | 1 | 33.33% |
| Total | 87 | 100.00% | 3 | 100.00% |
int op_cpu_buffer_write_commit(struct op_entry *entry)
{
return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 19 | 95.00% | 1 | 50.00% |
andi kleen | andi kleen | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
if (!e)
return NULL;
entry->event = e;
entry->sample = ring_buffer_event_data(e);
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
/ sizeof(entry->sample->data[0]);
entry->data = entry->sample->data;
return entry->sample;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 96 | 96.00% | 2 | 50.00% |
steven rostedt | steven rostedt | 2 | 2.00% | 1 | 25.00% |
andi kleen | andi kleen | 2 | 2.00% | 1 | 25.00% |
| Total | 100 | 100.00% | 4 | 100.00% |
unsigned long op_cpu_buffer_entries(int cpu)
{
return ring_buffer_entries_cpu(op_ring_buffer, cpu);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 16 | 94.12% | 1 | 50.00% |
andi kleen | andi kleen | 1 | 5.88% | 1 | 50.00% |
| Total | 17 | 100.00% | 2 | 100.00% |
static int
op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
int is_kernel, struct task_struct *task)
{
struct op_entry entry;
struct op_sample *sample;
unsigned long flags;
int size;
flags = 0;
if (backtrace)
flags |= TRACE_BEGIN;
/* notice a switch from user->kernel or vice versa */
is_kernel = !!is_kernel;
if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel;
flags |= KERNEL_CTX_SWITCH;
if (is_kernel)
flags |= IS_KERNEL;
}
/* notice a task switch */
if (cpu_buf->last_task != task) {
cpu_buf->last_task = task;
flags |= USER_CTX_SWITCH;
}
if (!flags)
/* nothing to do */
return 0;
if (flags & USER_CTX_SWITCH)
size = 1;
else
size = 0;
sample = op_cpu_buffer_write_reserve(&entry, size);
if (!sample)
return -ENOMEM;
sample->eip = ESCAPE_CODE;
sample->event = flags;
if (size)
op_cpu_buffer_add_data(&entry, (unsigned long)task);
op_cpu_buffer_write_commit(&entry);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 162 | 86.63% | 7 | 70.00% |
greg banks | greg banks | 18 | 9.63% | 1 | 10.00% |
john levon | john levon | 7 | 3.74% | 2 | 20.00% |
| Total | 187 | 100.00% | 10 | 100.00% |
static inline int
op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
unsigned long pc, unsigned long event)
{
struct op_entry entry;
struct op_sample *sample;
sample = op_cpu_buffer_write_reserve(&entry, 0);
if (!sample)
return -ENOMEM;
sample->eip = pc;
sample->event = event;
return op_cpu_buffer_write_commit(&entry);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 52 | 77.61% | 2 | 50.00% |
greg banks | greg banks | 14 | 20.90% | 1 | 25.00% |
jesper juhl | jesper juhl | 1 | 1.49% | 1 | 25.00% |
| Total | 67 | 100.00% | 4 | 100.00% |
/*
* This must be safe from any context.
*
* is_kernel is needed because on some architectures you cannot
* tell if you are in kernel or user space simply by looking at
* pc. We tag this in the buffer by generating kernel enter/exit
* events whenever is_kernel changes
*/
static int
log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
unsigned long backtrace, int is_kernel, unsigned long event,
struct task_struct *task)
{
struct task_struct *tsk = task ? task : current;
cpu_buf->sample_received++;
if (pc == ESCAPE_CODE) {
cpu_buf->sample_invalid_eip++;
return 0;
}
if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
goto fail;
if (op_add_sample(cpu_buf, pc, event))
goto fail;
return 1;
fail:
cpu_buf->sample_lost_overflow++;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
greg banks | greg banks | 32 | 30.19% | 1 | 11.11% |
robert richter | robert richter | 28 | 26.42% | 3 | 33.33% |
heinz graalfs | heinz graalfs | 17 | 16.04% | 1 | 11.11% |
john levon | john levon | 15 | 14.15% | 3 | 33.33% |
philippe elie | philippe elie | 14 | 13.21% | 1 | 11.11% |
| Total | 106 | 100.00% | 9 | 100.00% |
static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
{
cpu_buf->tracing = 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
greg banks | greg banks | 12 | 66.67% | 1 | 33.33% |
john levon | john levon | 4 | 22.22% | 1 | 33.33% |
robert richter | robert richter | 2 | 11.11% | 1 | 33.33% |
| Total | 18 | 100.00% | 3 | 100.00% |
static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
{
cpu_buf->tracing = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
greg banks | greg banks | 14 | 77.78% | 1 | 33.33% |
john levon | john levon | 3 | 16.67% | 1 | 33.33% |
robert richter | robert richter | 1 | 5.56% | 1 | 33.33% |
| Total | 18 | 100.00% | 3 | 100.00% |
static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel,
struct task_struct *task)
{
struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
/*
* if log_sample() fail we can't backtrace since we lost the
* source of this event
*/
if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
/* failed */
return;
if (!backtrace)
return;
oprofile_begin_trace(cpu_buf);
oprofile_ops.backtrace(regs, backtrace);
oprofile_end_trace(cpu_buf);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
greg banks | greg banks | 45 | 48.91% | 1 | 11.11% |
robert richter | robert richter | 21 | 22.83% | 2 | 22.22% |
brian rogan | brian rogan | 7 | 7.61% | 1 | 11.11% |
john levon | john levon | 7 | 7.61% | 1 | 11.11% |
heinz graalfs | heinz graalfs | 7 | 7.61% | 1 | 11.11% |
mike travis | mike travis | 2 | 2.17% | 1 | 11.11% |
christoph lameter | christoph lameter | 2 | 2.17% | 1 | 11.11% |
tejun heo | tejun heo | 1 | 1.09% | 1 | 11.11% |
| Total | 92 | 100.00% | 9 | 100.00% |
void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel,
struct task_struct *task)
{
__oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
heinz graalfs | heinz graalfs | 40 | 100.00% | 1 | 100.00% |
| Total | 40 | 100.00% | 1 | 100.00% |
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
__oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 33 | 94.29% | 1 | 50.00% |
heinz graalfs | heinz graalfs | 2 | 5.71% | 1 | 50.00% |
| Total | 35 | 100.00% | 2 | 100.00% |
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
int is_kernel;
unsigned long pc;
if (likely(regs)) {
is_kernel = !user_mode(regs);
pc = profile_pc(regs);
} else {
is_kernel = 0; /* This value will not be used */
pc = ESCAPE_CODE; /* as this causes an early return. */
}
__oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
brian rogan | brian rogan | 37 | 51.39% | 1 | 25.00% |
phil carmody | phil carmody | 32 | 44.44% | 1 | 25.00% |
heinz graalfs | heinz graalfs | 2 | 2.78% | 1 | 25.00% |
robert richter | robert richter | 1 | 1.39% | 1 | 25.00% |
| Total | 72 | 100.00% | 4 | 100.00% |
/*
* Add samples with data to the ring buffer.
*
* Use oprofile_add_data(&entry, val) to add data and
* oprofile_write_commit(&entry) to commit the sample.
*/
void
oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
unsigned long pc, int code, int size)
{
struct op_sample *sample;
int is_kernel = !user_mode(regs);
struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
cpu_buf->sample_received++;
/* no backtraces for samples with data */
if (op_add_code(cpu_buf, 0, is_kernel, current))
goto fail;
sample = op_cpu_buffer_write_reserve(entry, size + 2);
if (!sample)
goto fail;
sample->eip = ESCAPE_CODE;
sample->event = 0; /* no flags */
op_cpu_buffer_add_data(entry, code);
op_cpu_buffer_add_data(entry, pc);
return;
fail:
entry->event = NULL;
cpu_buf->sample_lost_overflow++;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 93 | 69.92% | 6 | 66.67% |
barry kasindorf | barry kasindorf | 37 | 27.82% | 1 | 11.11% |
christoph lameter | christoph lameter | 2 | 1.50% | 1 | 11.11% |
tejun heo | tejun heo | 1 | 0.75% | 1 | 11.11% |
| Total | 133 | 100.00% | 9 | 100.00% |
int oprofile_add_data(struct op_entry *entry, unsigned long val)
{
if (!entry->event)
return 0;
return op_cpu_buffer_add_data(entry, val);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 32 | 100.00% | 2 | 100.00% |
| Total | 32 | 100.00% | 2 | 100.00% |
int oprofile_add_data64(struct op_entry *entry, u64 val)
{
if (!entry->event)
return 0;
if (op_cpu_buffer_get_size(entry) < 2)
/*
* the function returns 0 to indicate a too small
* buffer, even if there is some space left
*/
return 0;
if (!op_cpu_buffer_add_data(entry, (u32)val))
return 0;
return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 67 | 100.00% | 1 | 100.00% |
| Total | 67 | 100.00% | 1 | 100.00% |
int oprofile_write_commit(struct op_entry *entry)
{
if (!entry->event)
return -EINVAL;
return op_cpu_buffer_write_commit(entry);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 27 | 100.00% | 2 | 100.00% |
| Total | 27 | 100.00% | 2 | 100.00% |
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
greg banks | greg banks | 27 | 64.29% | 1 | 14.29% |
john levon | john levon | 6 | 14.29% | 1 | 14.29% |
christoph lameter | christoph lameter | 2 | 4.76% | 1 | 14.29% |
heinz graalfs | heinz graalfs | 2 | 4.76% | 1 | 14.29% |
robert richter | robert richter | 2 | 4.76% | 1 | 14.29% |
mike travis | mike travis | 2 | 4.76% | 1 | 14.29% |
tejun heo | tejun heo | 1 | 2.38% | 1 | 14.29% |
| Total | 42 | 100.00% | 7 | 100.00% |
void oprofile_add_trace(unsigned long pc)
{
struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
if (!cpu_buf->tracing)
return;
/*
* broken frame can give an eip with the same value as an
* escape code, abort the trace if we get it
*/
if (pc == ESCAPE_CODE)
goto fail;
if (op_add_sample(cpu_buf, pc, 0))
goto fail;
return;
fail:
cpu_buf->tracing = 0;
cpu_buf->backtrace_aborted++;
return;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
greg banks | greg banks | 36 | 53.73% | 1 | 12.50% |
robert richter | robert richter | 21 | 31.34% | 3 | 37.50% |
john levon | john levon | 5 | 7.46% | 1 | 12.50% |
christoph lameter | christoph lameter | 2 | 2.99% | 1 | 12.50% |
mike travis | mike travis | 2 | 2.99% | 1 | 12.50% |
tejun heo | tejun heo | 1 | 1.49% | 1 | 12.50% |
| Total | 67 | 100.00% | 8 | 100.00% |
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
*
* By using schedule_delayed_work_on and then schedule_delayed_work
* we guarantee this will stay on the correct cpu
*/
static void wq_sync_buffer(struct work_struct *work)
{
struct oprofile_cpu_buffer *b =
container_of(work, struct oprofile_cpu_buffer, work.work);
if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
cancel_delayed_work(&b->work);
return;
}
sync_buffer(b->cpu);
/* don't re-add the work if we're shutting down */
if (work_enabled)
schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
john levon | john levon | 40 | 51.28% | 2 | 33.33% |
chris j arges | chris j arges | 19 | 24.36% | 1 | 16.67% |
david howells | david howells | 14 | 17.95% | 1 | 16.67% |
anton blanchard | anton blanchard | 4 | 5.13% | 1 | 16.67% |
robert richter | robert richter | 1 | 1.28% | 1 | 16.67% |
| Total | 78 | 100.00% | 6 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
robert richter | robert richter | 823 | 49.37% | 22 | 43.14% |
john levon | john levon | 251 | 15.06% | 6 | 11.76% |
greg banks | greg banks | 211 | 12.66% | 1 | 1.96% |
heinz graalfs | heinz graalfs | 70 | 4.20% | 1 | 1.96% |
brian rogan | brian rogan | 44 | 2.64% | 1 | 1.96% |
barry kasindorf | barry kasindorf | 37 | 2.22% | 1 | 1.96% |
phil carmody | phil carmody | 32 | 1.92% | 1 | 1.96% |
carl love | carl love | 30 | 1.80% | 1 | 1.96% |
philippe elie | philippe elie | 26 | 1.56% | 1 | 1.96% |
anton blanchard | anton blanchard | 25 | 1.50% | 2 | 3.92% |
mike travis | mike travis | 23 | 1.38% | 1 | 1.96% |
tejun heo | tejun heo | 21 | 1.26% | 2 | 3.92% |
chris j arges | chris j arges | 20 | 1.20% | 1 | 1.96% |
david howells | david howells | 18 | 1.08% | 1 | 1.96% |
andi kleen | andi kleen | 15 | 0.90% | 1 | 1.96% |
christoph lameter | christoph lameter | 10 | 0.60% | 1 | 1.96% |
andrew morton | andrew morton | 3 | 0.18% | 1 | 1.96% |
pekka j enberg | pekka j enberg | 2 | 0.12% | 1 | 1.96% |
steven rostedt | steven rostedt | 2 | 0.12% | 1 | 1.96% |
eric dumazet | eric dumazet | 1 | 0.06% | 1 | 1.96% |
adrian bunk | adrian bunk | 1 | 0.06% | 1 | 1.96% |
jesper juhl | jesper juhl | 1 | 0.06% | 1 | 1.96% |
mika kukkonen | mika kukkonen | 1 | 0.06% | 1 | 1.96% |
| Total | 1667 | 100.00% | 51 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.