cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/oprofile/op_model_ppro.c

/*
 * @file op_model_ppro.h
 * Family 6 perfmon and architectural perfmon MSR operations
 *
 * @remark Copyright 2002 OProfile authors
 * @remark Copyright 2008 Intel Corporation
 * @remark Read the file COPYING
 *
 * @author John Levon
 * @author Philippe Elie
 * @author Graydon Hoare
 * @author Andi Kleen
 * @author Robert Richter <robert.richter@amd.com>
 */

#include <linux/oprofile.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/nmi.h>

#include "op_x86_model.h"
#include "op_counter.h"


static int num_counters = 2;

static int counter_width = 32;


#define MSR_PPRO_EVENTSEL_RESERVED	((0xFFFFFFFFULL<<32)|(1ULL<<21))


static u64 reset_value[OP_MAX_COUNTER];


static void ppro_shutdown(struct op_msrs const * const msrs) { int i; for (i = 0; i < num_counters; ++i) { if (!msrs->counters[i].addr) continue; release_perfctr_nmi(MSR_P6_PERFCTR0 + i); release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); } }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter58100.00%1100.00%
Total58100.00%1100.00%


static int ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; for (i = 0; i < num_counters; i++) { if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) goto fail; if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { release_perfctr_nmi(MSR_P6_PERFCTR0 + i); goto fail; } /* both registers must be reserved */ msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; continue; fail: if (!counter_config[i].enabled) continue; op_x86_warn_reserved(i); ppro_shutdown(msrs); return -EBUSY; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter5947.97%233.33%
Don Zickus3226.02%116.67%
John Levon3125.20%233.33%
Andi Kleen10.81%116.67%
Total123100.00%6100.00%


static void ppro_setup_ctrs(struct op_x86_model_spec const *model, struct op_msrs const * const msrs) { u64 val; int i; if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) { union cpuid10_eax eax; eax.full = cpuid_eax(0xa); /* * For Core2 (family 6, model 15), don't reset the * counter width: */ if (!(eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && __this_cpu_read(cpu_info.x86_model) == 15)) { if (counter_width < eax.split.bit_width) counter_width = eax.split.bit_width; } } /* clear all counters */ for (i = 0; i < num_counters; ++i) { if (!msrs->controls[i].addr) continue; rdmsrl(msrs->controls[i].addr, val); if (val & ARCH_PERFMON_EVENTSEL_ENABLE) op_x86_warn_in_use(i); val &= model->reserved; wrmsrl(msrs->controls[i].addr, val); /* * avoid a false detection of ctr overflows in NMI * * handler */ wrmsrl(msrs->counters[i].addr, -1LL); } /* enable active counters */ for (i = 0; i < num_counters; ++i) { if (counter_config[i].enabled && msrs->counters[i].addr) { reset_value[i] = counter_config[i].count; wrmsrl(msrs->counters[i].addr, -reset_value[i]); rdmsrl(msrs->controls[i].addr, val); val &= model->reserved; val |= op_x86_get_ctrl(model, &counter_config[i]); wrmsrl(msrs->controls[i].addr, val); } else { reset_value[i] = 0; } } }

Contributors

PersonTokensPropCommitsCommitProp
John Levon10633.97%214.29%
Robert Richter9229.49%750.00%
Andi Kleen5718.27%17.14%
Tim Blechmann268.33%17.14%
Don Zickus196.09%17.14%
Tejun Heo82.56%17.14%
Borislav Petkov41.28%17.14%
Total312100.00%14100.00%


static int ppro_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { u64 val; int i; for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; rdmsrl(msrs->counters[i].addr, val); if (val & (1ULL << (counter_width - 1))) continue; oprofile_add_sample(regs, i); wrmsrl(msrs->counters[i].addr, -reset_value[i]); } /* Only P6 based Pentium M need to re-unmask the apic vector but it * doesn't hurt other P6 variant */ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); /* We can't work out if we really handled an interrupt. We * might have caught a *second* counter just after overflowing * the interrupt for this counter then arrives * and we don't find a counter that's overflowed, so we * would return 0 and get dazed + confused. Instead we always * assume we found an overflow. This sucks. */ return 1; }

Contributors

PersonTokensPropCommitsCommitProp
John Levon5948.76%225.00%
Andi Kleen2319.01%225.00%
Andrew Morton1411.57%112.50%
Robert Richter119.09%112.50%
Don Zickus97.44%112.50%
Greg Banks54.13%112.50%
Total121100.00%8100.00%


static void ppro_start(struct op_msrs const * const msrs) { u64 val; int i; for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { rdmsrl(msrs->controls[i].addr, val); val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } } }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter2736.00%342.86%
John Levon2128.00%114.29%
Arun Sharma1824.00%114.29%
Don Zickus810.67%114.29%
Andi Kleen11.33%114.29%
Total75100.00%7100.00%


static void ppro_stop(struct op_msrs const * const msrs) { u64 val; int i; for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; rdmsrl(msrs->controls[i].addr, val); val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter2836.84%342.86%
John Levon2127.63%114.29%
Arun Sharma1925.00%114.29%
Don Zickus79.21%114.29%
Andi Kleen11.32%114.29%
Total76100.00%7100.00%

struct op_x86_model_spec op_ppro_spec = { .num_counters = 2, .num_controls = 2, .reserved = MSR_PPRO_EVENTSEL_RESERVED, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, .start = &ppro_start, .stop = &ppro_stop, .shutdown = &ppro_shutdown }; /* * Architectural performance monitoring. * * Newer Intel CPUs (Core1+) have support for architectural * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details. * The advantage of this is that it can be done without knowing about * the specific CPU. */
static void arch_perfmon_setup_counters(void) { union cpuid10_eax eax; eax.full = cpuid_eax(0xa); /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 15) { eax.split.version_id = 2; eax.split.num_counters = 2; eax.split.bit_width = 40; } num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); op_arch_perfmon_spec.num_counters = num_counters; op_arch_perfmon_spec.num_controls = num_counters; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen8586.73%120.00%
Robert Richter1111.22%360.00%
Borislav Petkov22.04%120.00%
Total98100.00%5100.00%


static int arch_perfmon_init(struct oprofile_operations *ignore) { arch_perfmon_setup_counters(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Richter17100.00%1100.00%
Total17100.00%1100.00%

struct op_x86_model_spec op_arch_perfmon_spec = { .reserved = MSR_PPRO_EVENTSEL_RESERVED, .init = &arch_perfmon_init, /* num_counters/num_controls filled in at runtime */ .fill_in_addresses = &ppro_fill_in_addresses, /* user space does the cpuid check for available events */ .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, .start = &ppro_start, .stop = &ppro_stop, .shutdown = &ppro_shutdown };

Overall Contributors

PersonTokensPropCommitsCommitProp
Robert Richter32230.93%1446.67%
John Levon30229.01%413.33%
Andi Kleen23122.19%26.67%
Don Zickus848.07%26.67%
Arun Sharma373.55%13.33%
Tim Blechmann262.50%13.33%
Andrew Morton171.63%13.33%
Tejun Heo80.77%13.33%
Borislav Petkov60.58%26.67%
Greg Banks50.48%13.33%
Maarten Lankhorst30.29%13.33%
Total1041100.00%30100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.