cregit-Linux how code gets into the kernel

Release 4.7 drivers/oprofile/cpu_buffer.c

Directory: drivers/oprofile
/**
 * @file cpu_buffer.c
 *
 * @remark Copyright 2002-2009 OProfile authors
 * @remark Read the file COPYING
 *
 * @author John Levon <levon@movementarian.org>
 * @author Barry Kasindorf <barry.kasindorf@amd.com>
 * @author Robert Richter <robert.richter@amd.com>
 *
 * Each CPU has a local buffer that stores PC value/event
 * pairs. We also log context switches when we notice them.
 * Eventually each CPU's buffer is processed into the global
 * event buffer by sync_buffer().
 *
 * We use a local buffer for two reasons: an NMI or similar
 * interrupt cannot synchronise, and high sampling rates
 * would lead to catastrophic global synchronisation if
 * a global buffer was used.
 */

#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/errno.h>

#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprof.h"


#define OP_BUFFER_FLAGS	0


static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);

static void wq_sync_buffer(struct work_struct *work);


#define DEFAULT_TIMER_EXPIRE (HZ / 10)

static int work_enabled;


unsigned long oprofile_get_cpu_buffer_size(void) { return oprofile_cpu_buffer_size; }

Contributors

PersonTokensPropCommitsCommitProp
carl lovecarl love1090.91%150.00%
robert richterrobert richter19.09%150.00%
Total11100.00%2100.00%


void oprofile_cpu_buffer_inc_smpl_lost(void) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); cpu_buf->sample_lost_overflow++; }

Contributors

PersonTokensPropCommitsCommitProp
carl lovecarl love2086.96%133.33%
christoph lameterchristoph lameter28.70%133.33%
tejun heotejun heo14.35%133.33%
Total23100.00%3100.00%


void free_cpu_buffers(void) { if (op_ring_buffer) ring_buffer_free(op_ring_buffer); op_ring_buffer = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter1785.00%150.00%
andi kleenandi kleen315.00%150.00%
Total20100.00%2100.00%

#define RB_EVENT_HDR_SIZE 4
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = oprofile_cpu_buffer_size; unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; b->sample_received = 0; b->sample_lost_overflow = 0; b->backtrace_aborted = 0; b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
john levonjohn levon8055.17%423.53%
robert richterrobert richter3222.07%317.65%
philippe eliephilippe elie128.28%15.88%
greg banksgreg banks64.14%15.88%
mike travismike travis42.76%15.88%
anton blanchardanton blanchard32.07%15.88%
pekka j enbergpekka j enberg21.38%15.88%
andi kleenandi kleen21.38%15.88%
david howellsdavid howells10.69%15.88%
chris j argeschris j arges10.69%15.88%
tejun heotejun heo10.69%15.88%
mika kukkonenmika kukkonen10.69%15.88%
Total145100.00%17100.00%


void start_cpu_work(void) { int i; work_enabled = 1; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* * Spread the work by 1 jiffy per cpu so they dont all * fire at once. */ schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); } }

Contributors

PersonTokensPropCommitsCommitProp
john levonjohn levon3266.67%233.33%
anton blanchardanton blanchard1122.92%233.33%
mike travismike travis48.33%116.67%
tejun heotejun heo12.08%116.67%
Total48100.00%6100.00%


void end_cpu_work(void) { work_enabled = 0; }

Contributors

PersonTokensPropCommitsCommitProp
john levonjohn levon1090.91%266.67%
tejun heotejun heo19.09%133.33%
Total11100.00%3100.00%


void flush_cpu_work(void) { int i; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* these works are per-cpu, no need for flush_sync */ flush_delayed_work(&b->work); } }

Contributors

PersonTokensPropCommitsCommitProp
john levonjohn levon1642.11%114.29%
tejun heotejun heo1231.58%228.57%
anton blanchardanton blanchard513.16%228.57%
mike travismike travis410.53%114.29%
greg banksgreg banks12.63%114.29%
Total38100.00%7100.00%

/* * This function prepares the cpu buffer to write a sample. * * Struct op_entry is used during operations on the ring buffer while * struct op_sample contains the data that is stored in the ring * buffer. Struct entry can be uninitialized. The function reserves a * data array that is specified by size. Use * op_cpu_buffer_write_commit() after preparing the sample. In case of * errors a null pointer is returned, otherwise the pointer to the * sample. * */
struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve (op_ring_buffer, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); if (!entry->event) return NULL; entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; return entry->sample; }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter8294.25%266.67%
andi kleenandi kleen55.75%133.33%
Total87100.00%3100.00%


int op_cpu_buffer_write_commit(struct op_entry *entry) { return ring_buffer_unlock_commit(op_ring_buffer, entry->event); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter1995.00%150.00%
andi kleenandi kleen15.00%150.00%
Total20100.00%2100.00%


struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); if (!e) return NULL; entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) / sizeof(entry->sample->data[0]); entry->data = entry->sample->data; return entry->sample; }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter9696.00%250.00%
steven rostedtsteven rostedt22.00%125.00%
andi kleenandi kleen22.00%125.00%
Total100100.00%4100.00%


unsigned long op_cpu_buffer_entries(int cpu) { return ring_buffer_entries_cpu(op_ring_buffer, cpu); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter1694.12%150.00%
andi kleenandi kleen15.88%150.00%
Total17100.00%2100.00%


static int op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, int is_kernel, struct task_struct *task) { struct op_entry entry; struct op_sample *sample; unsigned long flags; int size; flags = 0; if (backtrace) flags |= TRACE_BEGIN; /* notice a switch from user->kernel or vice versa */ is_kernel = !!is_kernel; if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; flags |= KERNEL_CTX_SWITCH; if (is_kernel) flags |= IS_KERNEL; } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; flags |= USER_CTX_SWITCH; } if (!flags) /* nothing to do */ return 0; if (flags & USER_CTX_SWITCH) size = 1; else size = 0; sample = op_cpu_buffer_write_reserve(&entry, size); if (!sample) return -ENOMEM; sample->eip = ESCAPE_CODE; sample->event = flags; if (size) op_cpu_buffer_add_data(&entry, (unsigned long)task); op_cpu_buffer_write_commit(&entry); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter16286.63%770.00%
greg banksgreg banks189.63%110.00%
john levonjohn levon73.74%220.00%
Total187100.00%10100.00%


static inline int op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) { struct op_entry entry; struct op_sample *sample; sample = op_cpu_buffer_write_reserve(&entry, 0); if (!sample) return -ENOMEM; sample->eip = pc; sample->event = event; return op_cpu_buffer_write_commit(&entry); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter5277.61%250.00%
greg banksgreg banks1420.90%125.00%
jesper juhljesper juhl11.49%125.00%
Total67100.00%4100.00%

/* * This must be safe from any context. * * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */
static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long backtrace, int is_kernel, unsigned long event, struct task_struct *task) { struct task_struct *tsk = task ? task : current; cpu_buf->sample_received++; if (pc == ESCAPE_CODE) { cpu_buf->sample_invalid_eip++; return 0; } if (op_add_code(cpu_buf, backtrace, is_kernel, tsk)) goto fail; if (op_add_sample(cpu_buf, pc, event)) goto fail; return 1; fail: cpu_buf->sample_lost_overflow++; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
greg banksgreg banks3230.19%111.11%
robert richterrobert richter2826.42%333.33%
heinz graalfsheinz graalfs1716.04%111.11%
john levonjohn levon1514.15%333.33%
philippe eliephilippe elie1413.21%111.11%
Total106100.00%9100.00%


static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { cpu_buf->tracing = 1; }

Contributors

PersonTokensPropCommitsCommitProp
greg banksgreg banks1266.67%133.33%
john levonjohn levon422.22%133.33%
robert richterrobert richter211.11%133.33%
Total18100.00%3100.00%


static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) { cpu_buf->tracing = 0; }

Contributors

PersonTokensPropCommitsCommitProp
greg banksgreg banks1477.78%133.33%
john levonjohn levon316.67%133.33%
robert richterrobert richter15.56%133.33%
Total18100.00%3100.00%


static inline void __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); unsigned long backtrace = oprofile_backtrace_depth; /* * if log_sample() fail we can't backtrace since we lost the * source of this event */ if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) /* failed */ return; if (!backtrace) return; oprofile_begin_trace(cpu_buf); oprofile_ops.backtrace(regs, backtrace); oprofile_end_trace(cpu_buf); }

Contributors

PersonTokensPropCommitsCommitProp
greg banksgreg banks4548.91%111.11%
robert richterrobert richter2122.83%222.22%
brian roganbrian rogan77.61%111.11%
john levonjohn levon77.61%111.11%
heinz graalfsheinz graalfs77.61%111.11%
mike travismike travis22.17%111.11%
christoph lameterchristoph lameter22.17%111.11%
tejun heotejun heo11.09%111.11%
Total92100.00%9100.00%


void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) { __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); }

Contributors

PersonTokensPropCommitsCommitProp
heinz graalfsheinz graalfs40100.00%1100.00%
Total40100.00%1100.00%


void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter3394.29%150.00%
heinz graalfsheinz graalfs25.71%150.00%
Total35100.00%2100.00%


void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { int is_kernel; unsigned long pc; if (likely(regs)) { is_kernel = !user_mode(regs); pc = profile_pc(regs); } else { is_kernel = 0; /* This value will not be used */ pc = ESCAPE_CODE; /* as this causes an early return. */ } __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
brian roganbrian rogan3751.39%125.00%
phil carmodyphil carmody3244.44%125.00%
heinz graalfsheinz graalfs22.78%125.00%
robert richterrobert richter11.39%125.00%
Total72100.00%4100.00%

/* * Add samples with data to the ring buffer. * * Use oprofile_add_data(&entry, val) to add data and * oprofile_write_commit(&entry) to commit the sample. */
void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size) { struct op_sample *sample; int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); cpu_buf->sample_received++; /* no backtraces for samples with data */ if (op_add_code(cpu_buf, 0, is_kernel, current)) goto fail; sample = op_cpu_buffer_write_reserve(entry, size + 2); if (!sample) goto fail; sample->eip = ESCAPE_CODE; sample->event = 0; /* no flags */ op_cpu_buffer_add_data(entry, code); op_cpu_buffer_add_data(entry, pc); return; fail: entry->event = NULL; cpu_buf->sample_lost_overflow++; }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter9369.92%666.67%
barry kasindorfbarry kasindorf3727.82%111.11%
christoph lameterchristoph lameter21.50%111.11%
tejun heotejun heo10.75%111.11%
Total133100.00%9100.00%


int oprofile_add_data(struct op_entry *entry, unsigned long val) { if (!entry->event) return 0; return op_cpu_buffer_add_data(entry, val); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter32100.00%2100.00%
Total32100.00%2100.00%


int oprofile_add_data64(struct op_entry *entry, u64 val) { if (!entry->event) return 0; if (op_cpu_buffer_get_size(entry) < 2) /* * the function returns 0 to indicate a too small * buffer, even if there is some space left */ return 0; if (!op_cpu_buffer_add_data(entry, (u32)val)) return 0; return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter67100.00%1100.00%
Total67100.00%1100.00%


int oprofile_write_commit(struct op_entry *entry) { if (!entry->event) return -EINVAL; return op_cpu_buffer_write_commit(entry); }

Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter27100.00%2100.00%
Total27100.00%2100.00%


void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
greg banksgreg banks2764.29%114.29%
john levonjohn levon614.29%114.29%
christoph lameterchristoph lameter24.76%114.29%
heinz graalfsheinz graalfs24.76%114.29%
robert richterrobert richter24.76%114.29%
mike travismike travis24.76%114.29%
tejun heotejun heo12.38%114.29%
Total42100.00%7100.00%


void oprofile_add_trace(unsigned long pc) { struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); if (!cpu_buf->tracing) return; /* * broken frame can give an eip with the same value as an * escape code, abort the trace if we get it */ if (pc == ESCAPE_CODE) goto fail; if (op_add_sample(cpu_buf, pc, 0)) goto fail; return; fail: cpu_buf->tracing = 0; cpu_buf->backtrace_aborted++; return; }

Contributors

PersonTokensPropCommitsCommitProp
greg banksgreg banks3653.73%112.50%
robert richterrobert richter2131.34%337.50%
john levonjohn levon57.46%112.50%
christoph lameterchristoph lameter22.99%112.50%
mike travismike travis22.99%112.50%
tejun heotejun heo11.49%112.50%
Total67100.00%8100.00%

/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */
static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }

Contributors

PersonTokensPropCommitsCommitProp
john levonjohn levon4051.28%233.33%
chris j argeschris j arges1924.36%116.67%
david howellsdavid howells1417.95%116.67%
anton blanchardanton blanchard45.13%116.67%
robert richterrobert richter11.28%116.67%
Total78100.00%6100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
robert richterrobert richter82349.37%2243.14%
john levonjohn levon25115.06%611.76%
greg banksgreg banks21112.66%11.96%
heinz graalfsheinz graalfs704.20%11.96%
brian roganbrian rogan442.64%11.96%
barry kasindorfbarry kasindorf372.22%11.96%
phil carmodyphil carmody321.92%11.96%
carl lovecarl love301.80%11.96%
philippe eliephilippe elie261.56%11.96%
anton blanchardanton blanchard251.50%23.92%
mike travismike travis231.38%11.96%
tejun heotejun heo211.26%23.92%
chris j argeschris j arges201.20%11.96%
david howellsdavid howells181.08%11.96%
andi kleenandi kleen150.90%11.96%
christoph lameterchristoph lameter100.60%11.96%
andrew mortonandrew morton30.18%11.96%
pekka j enbergpekka j enberg20.12%11.96%
steven rostedtsteven rostedt20.12%11.96%
eric dumazeteric dumazet10.06%11.96%
adrian bunkadrian bunk10.06%11.96%
jesper juhljesper juhl10.06%11.96%
mika kukkonenmika kukkonen10.06%11.96%
Total1667100.00%51100.00%
Directory: drivers/oprofile
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}