cregit-Linux how code gets into the kernel

Release 4.11 drivers/oprofile/cpu_buffer.c

Directory: drivers/oprofile
/**
 * @file cpu_buffer.c
 *
 * @remark Copyright 2002-2009 OProfile authors
 * @remark Read the file COPYING
 *
 * @author John Levon <levon@movementarian.org>
 * @author Barry Kasindorf <barry.kasindorf@amd.com>
 * @author Robert Richter <robert.richter@amd.com>
 *
 * Each CPU has a local buffer that stores PC value/event
 * pairs. We also log context switches when we notice them.
 * Eventually each CPU's buffer is processed into the global
 * event buffer by sync_buffer().
 *
 * We use a local buffer for two reasons: an NMI or similar
 * interrupt cannot synchronise, and high sampling rates
 * would lead to catastrophic global synchronisation if
 * a global buffer was used.
 */

#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/errno.h>

#include <asm/ptrace.h>

#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprof.h"


#define OP_BUFFER_FLAGS	0


static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);

static void wq_sync_buffer(struct work_struct *work);


#define DEFAULT_TIMER_EXPIRE (HZ / 10)

static int work_enabled;


unsigned long oprofile_get_cpu_buffer_size(void) { return oprofile_cpu_buffer_size; }

Contributors

PersonTokensPropCommitsCommitProp
Carl E. Love1090.91%150.00%
Robert Richter19.09%150.00%
Total11100.00%2100.00%


void oprofile_cpu_buffer_inc_smpl_lost(void) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); cpu_buf->sample_lost_overflow++; }

Contributors

PersonTokensPropCommitsCommitProp
Carl E. Love2086.96%133.33%
Christoph Lameter28.70%133.33%
Tejun Heo14.35%133.33%
Total23100.00%3100.00%


void free_cpu_buffers(void) { if (op_ring_buffer) ring_buffer_free(op_ring_buffer); op_ring_buffer = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter1785.00%150.00%
Andi Kleen315.00%150.00%
Total20100.00%2100.00%

#define RB_EVENT_HDR_SIZE 4
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = oprofile_cpu_buffer_size; unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; b->sample_received = 0; b->sample_lost_overflow = 0; b->backtrace_aborted = 0; b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
John Levon8055.17%423.53%
Robert Richter3222.07%317.65%
Philippe Elie128.28%15.88%
Greg Banks64.14%15.88%
Mike Travis42.76%15.88%
Anton Blanchard32.07%15.88%
Andi Kleen21.38%15.88%
Pekka J Enberg21.38%15.88%
Chris J Arges10.69%15.88%
Tejun Heo10.69%15.88%
David Howells10.69%15.88%
Mika Kukkonen10.69%15.88%
Total145100.00%17100.00%


void start_cpu_work(void) { int i; work_enabled = 1; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* * Spread the work by 1 jiffy per cpu so they dont all * fire at once. */ schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); } }

Contributors

PersonTokensPropCommitsCommitProp
John Levon3266.67%233.33%
Anton Blanchard1122.92%233.33%
Mike Travis48.33%116.67%
Tejun Heo12.08%116.67%
Total48100.00%6100.00%


void end_cpu_work(void) { work_enabled = 0; }

Contributors

PersonTokensPropCommitsCommitProp
John Levon1090.91%266.67%
Tejun Heo19.09%133.33%
Total11100.00%3100.00%


void flush_cpu_work(void) { int i; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* these works are per-cpu, no need for flush_sync */ flush_delayed_work(&b->work); } }

Contributors

PersonTokensPropCommitsCommitProp
John Levon1642.11%114.29%
Tejun Heo1231.58%228.57%
Anton Blanchard513.16%228.57%
Mike Travis410.53%114.29%
Greg Banks12.63%114.29%
Total38100.00%7100.00%

/* * This function prepares the cpu buffer to write a sample. * * Struct op_entry is used during operations on the ring buffer while * struct op_sample contains the data that is stored in the ring * buffer. Struct entry can be uninitialized. The function reserves a * data array that is specified by size. Use * op_cpu_buffer_write_commit() after preparing the sample. In case of * errors a null pointer is returned, otherwise the pointer to the * sample. * */
struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve (op_ring_buffer, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); if (!entry->event) return NULL; entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; return entry->sample; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter8294.25%266.67%
Andi Kleen55.75%133.33%
Total87100.00%3100.00%


int op_cpu_buffer_write_commit(struct op_entry *entry) { return ring_buffer_unlock_commit(op_ring_buffer, entry->event); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter1995.00%150.00%
Andi Kleen15.00%150.00%
Total20100.00%2100.00%


struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); if (!e) return NULL; entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) / sizeof(entry->sample->data[0]); entry->data = entry->sample->data; return entry->sample; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter9696.00%250.00%
Andi Kleen22.00%125.00%
Steven Rostedt22.00%125.00%
Total100100.00%4100.00%


unsigned long op_cpu_buffer_entries(int cpu) { return ring_buffer_entries_cpu(op_ring_buffer, cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter1694.12%150.00%
Andi Kleen15.88%150.00%
Total17100.00%2100.00%


static int op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, int is_kernel, struct task_struct *task) { struct op_entry entry; struct op_sample *sample; unsigned long flags; int size; flags = 0; if (backtrace) flags |= TRACE_BEGIN; /* notice a switch from user->kernel or vice versa */ is_kernel = !!is_kernel; if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; flags |= KERNEL_CTX_SWITCH; if (is_kernel) flags |= IS_KERNEL; } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; flags |= USER_CTX_SWITCH; } if (!flags) /* nothing to do */ return 0; if (flags & USER_CTX_SWITCH) size = 1; else size = 0; sample = op_cpu_buffer_write_reserve(&entry, size); if (!sample) return -ENOMEM; sample->eip = ESCAPE_CODE; sample->event = flags; if (size) op_cpu_buffer_add_data(&entry, (unsigned long)task); op_cpu_buffer_write_commit(&entry); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter16286.63%770.00%
Greg Banks189.63%110.00%
John Levon73.74%220.00%
Total187100.00%10100.00%


static inline int op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) { struct op_entry entry; struct op_sample *sample; sample = op_cpu_buffer_write_reserve(&entry, 0); if (!sample) return -ENOMEM; sample->eip = pc; sample->event = event; return op_cpu_buffer_write_commit(&entry); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter5277.61%250.00%
Greg Banks1420.90%125.00%
Jesper Juhl11.49%125.00%
Total67100.00%4100.00%

/* * This must be safe from any context. * * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */
static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long backtrace, int is_kernel, unsigned long event, struct task_struct *task) { struct task_struct *tsk = task ? task : current; cpu_buf->sample_received++; if (pc == ESCAPE_CODE) { cpu_buf->sample_invalid_eip++; return 0; } if (op_add_code(cpu_buf, backtrace, is_kernel, tsk)) goto fail; if (op_add_sample(cpu_buf, pc, event)) goto fail; return 1; fail: cpu_buf->sample_lost_overflow++; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks3230.19%111.11%
Robert Richter2826.42%333.33%
Heinz Graalfs1716.04%111.11%
John Levon1514.15%333.33%
Philippe Elie1413.21%111.11%
Total106100.00%9100.00%


static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { cpu_buf->tracing = 1; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks1266.67%133.33%
John Levon422.22%133.33%
Robert Richter211.11%133.33%
Total18100.00%3100.00%


static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) { cpu_buf->tracing = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks1477.78%133.33%
John Levon316.67%133.33%
Robert Richter15.56%133.33%
Total18100.00%3100.00%


static inline void __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); unsigned long backtrace = oprofile_backtrace_depth; /* * if log_sample() fail we can't backtrace since we lost the * source of this event */ if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) /* failed */ return; if (!backtrace) return; oprofile_begin_trace(cpu_buf); oprofile_ops.backtrace(regs, backtrace); oprofile_end_trace(cpu_buf); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks4548.91%111.11%
Robert Richter2122.83%222.22%
Brian Rogan77.61%111.11%
John Levon77.61%111.11%
Heinz Graalfs77.61%111.11%
Christoph Lameter22.17%111.11%
Mike Travis22.17%111.11%
Tejun Heo11.09%111.11%
Total92100.00%9100.00%


void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) { __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); }

Contributors

PersonTokensPropCommitsCommitProp
Heinz Graalfs40100.00%1100.00%
Total40100.00%1100.00%


void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter3394.29%150.00%
Heinz Graalfs25.71%150.00%
Total35100.00%2100.00%


void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { int is_kernel; unsigned long pc; if (likely(regs)) { is_kernel = !user_mode(regs); pc = profile_pc(regs); } else { is_kernel = 0; /* This value will not be used */ pc = ESCAPE_CODE; /* as this causes an early return. */ } __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Brian Rogan3751.39%125.00%
Phil Carmody3244.44%125.00%
Heinz Graalfs22.78%125.00%
Robert Richter11.39%125.00%
Total72100.00%4100.00%

/* * Add samples with data to the ring buffer. * * Use oprofile_add_data(&entry, val) to add data and * oprofile_write_commit(&entry) to commit the sample. */
void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size) { struct op_sample *sample; int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); cpu_buf->sample_received++; /* no backtraces for samples with data */ if (op_add_code(cpu_buf, 0, is_kernel, current)) goto fail; sample = op_cpu_buffer_write_reserve(entry, size + 2); if (!sample) goto fail; sample->eip = ESCAPE_CODE; sample->event = 0; /* no flags */ op_cpu_buffer_add_data(entry, code); op_cpu_buffer_add_data(entry, pc); return; fail: entry->event = NULL; cpu_buf->sample_lost_overflow++; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter9369.92%666.67%
Barry Kasindorf3727.82%111.11%
Christoph Lameter21.50%111.11%
Tejun Heo10.75%111.11%
Total133100.00%9100.00%


int oprofile_add_data(struct op_entry *entry, unsigned long val) { if (!entry->event) return 0; return op_cpu_buffer_add_data(entry, val); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter32100.00%2100.00%
Total32100.00%2100.00%


int oprofile_add_data64(struct op_entry *entry, u64 val) { if (!entry->event) return 0; if (op_cpu_buffer_get_size(entry) < 2) /* * the function returns 0 to indicate a too small * buffer, even if there is some space left */ return 0; if (!op_cpu_buffer_add_data(entry, (u32)val)) return 0; return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter67100.00%1100.00%
Total67100.00%1100.00%


int oprofile_write_commit(struct op_entry *entry) { if (!entry->event) return -EINVAL; return op_cpu_buffer_write_commit(entry); }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter27100.00%2100.00%
Total27100.00%2100.00%


void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks2764.29%114.29%
John Levon614.29%114.29%
Mike Travis24.76%114.29%
Heinz Graalfs24.76%114.29%
Christoph Lameter24.76%114.29%
Robert Richter24.76%114.29%
Tejun Heo12.38%114.29%
Total42100.00%7100.00%


void oprofile_add_trace(unsigned long pc) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); if (!cpu_buf->tracing) return; /* * broken frame can give an eip with the same value as an * escape code, abort the trace if we get it */ if (pc == ESCAPE_CODE) goto fail; if (op_add_sample(cpu_buf, pc, 0)) goto fail; return; fail: cpu_buf->tracing = 0; cpu_buf->backtrace_aborted++; return; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks3653.73%112.50%
Robert Richter2131.34%337.50%
John Levon57.46%112.50%
Mike Travis22.99%112.50%
Christoph Lameter22.99%112.50%
Tejun Heo11.49%112.50%
Total67100.00%8100.00%

/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */
static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }

Contributors

PersonTokensPropCommitsCommitProp
John Levon4051.28%233.33%
Chris J Arges1924.36%116.67%
David Howells1417.95%116.67%
Anton Blanchard45.13%116.67%
Robert Richter11.28%116.67%
Total78100.00%6100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Robert Richter82349.28%2242.31%
John Levon25115.03%611.54%
Greg Banks21112.63%11.92%
Heinz Graalfs704.19%11.92%
Brian Rogan442.63%11.92%
Barry Kasindorf372.22%11.92%
Phil Carmody321.92%11.92%
Carl E. Love301.80%11.92%
Philippe Elie261.56%11.92%
Anton Blanchard251.50%23.85%
Mike Travis231.38%11.92%
Tejun Heo211.26%23.85%
Chris J Arges201.20%11.92%
David Howells181.08%11.92%
Andi Kleen150.90%11.92%
Christoph Lameter100.60%11.92%
Andrew Morton30.18%11.92%
Ingo Molnar30.18%11.92%
Pekka J Enberg20.12%11.92%
Steven Rostedt20.12%11.92%
Eric Dumazet10.06%11.92%
Adrian Bunk10.06%11.92%
Jesper Juhl10.06%11.92%
Mika Kukkonen10.06%11.92%
Total1670100.00%52100.00%
Directory: drivers/oprofile
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.