Release 4.11 drivers/perf/arm_pmu.c
#undef DEBUG
/*
* ARM performance counter support.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
*
* This code is based on the sparc64 perf event code, which is in turn based
* on the x86 code.
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of_device.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u64 config)
{
unsigned int cache_type, cache_op, cache_result, ret;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
return ret;
}
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
int mapping;
if (config >= PERF_COUNT_HW_MAX)
return -EINVAL;
mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
static int
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{
return (int)(config & raw_event_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 19 | 86.36% | 1 | 50.00% |
Mark Rutland | 3 | 13.64% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
int
armpmu_map_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask)
{
u64 config = event->attr.config;
int type = event->attr.type;
if (type == event->pmu->type)
return armpmu_map_raw_event(raw_event_mask, config);
switch (type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_hw_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
int armpmu_event_set_period(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Limit the maximum period to prevent the counter value
* from overtaking the one we are about to program. In
* effect we are reducing max_period to account for
* interrupt latency (and we are being very conservative).
*/
if (left > (armpmu->max_period >> 1))
left = armpmu->max_period >> 1;
local64_set(&hwc->prev_count, (u64)-left);
armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 152 | 81.72% | 1 | 20.00% |
Mark Rutland | 12 | 6.45% | 1 | 20.00% |
Sudeep KarkadaNagesha | 12 | 6.45% | 1 | 20.00% |
Daniel R Thompson | 6 | 3.23% | 1 | 20.00% |
Peter Zijlstra | 4 | 2.15% | 1 | 20.00% |
Total | 186 | 100.00% | 5 | 100.00% |
u64 armpmu_event_update(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = armpmu->read_counter(event);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 73 | 64.04% | 1 | 16.67% |
Will Deacon | 13 | 11.40% | 2 | 33.33% |
Sudeep KarkadaNagesha | 12 | 10.53% | 1 | 16.67% |
Mark Rutland | 12 | 10.53% | 1 | 16.67% |
Peter Zijlstra | 4 | 3.51% | 1 | 16.67% |
Total | 114 | 100.00% | 6 | 100.00% |
static void
armpmu_read(struct perf_event *event)
{
armpmu_event_update(event);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 12 | 75.00% | 1 | 50.00% |
Peter Zijlstra | 4 | 25.00% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static void
armpmu_stop(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(event);
armpmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 44 | 62.86% | 1 | 25.00% |
Jamie Iles | 13 | 18.57% | 1 | 25.00% |
Mark Rutland | 12 | 17.14% | 1 | 25.00% |
Sudeep KarkadaNagesha | 1 | 1.43% | 1 | 25.00% |
Total | 70 | 100.00% | 4 | 100.00% |
static void armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
*/
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
* were stopped we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
*/
armpmu_event_set_period(event);
armpmu->enable(event);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 34 | 45.95% | 1 | 25.00% |
Jamie Iles | 27 | 36.49% | 1 | 25.00% |
Mark Rutland | 12 | 16.22% | 1 | 25.00% |
Sudeep KarkadaNagesha | 1 | 1.35% | 1 | 25.00% |
Total | 74 | 100.00% | 4 | 100.00% |
static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
if (armpmu->clear_event_idx)
armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 34 | 34.00% | 1 | 14.29% |
Jamie Iles | 29 | 29.00% | 1 | 14.29% |
Mark Rutland | 22 | 22.00% | 4 | 57.14% |
Stephen Boyd | 15 | 15.00% | 1 | 14.29% |
Total | 100 | 100.00% | 7 | 100.00% |
static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
/* An event following a process won't be stopped earlier */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return -ENOENT;
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, event);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled.
*/
event->hw.idx = idx;
armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
armpmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 97 | 56.07% | 1 | 10.00% |
Mark Rutland | 41 | 23.70% | 5 | 50.00% |
Peter Zijlstra | 33 | 19.08% | 3 | 30.00% |
Sudeep KarkadaNagesha | 2 | 1.16% | 1 | 10.00% |
Total | 173 | 100.00% | 10 | 100.00% |
static int
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu;
if (is_software_event(event))
return 1;
/*
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
* core perf code won't check that the pmu->ctx == leader->ctx
* until after pmu->event_init(event).
*/
if (event->pmu != pmu)
return 0;
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, event) >= 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 32 | 32.65% | 3 | 37.50% |
Jamie Iles | 32 | 32.65% | 1 | 12.50% |
Suzuki K. Poulose | 26 | 26.53% | 1 | 12.50% |
Mark Rutland | 7 | 7.14% | 2 | 25.00% |
Sudeep KarkadaNagesha | 1 | 1.02% | 1 | 12.50% |
Total | 98 | 100.00% | 8 | 100.00% |
static int
validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
/*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 88 | 76.52% | 1 | 16.67% |
Suzuki K. Poulose | 12 | 10.43% | 1 | 16.67% |
Mark Rutland | 7 | 6.09% | 2 | 33.33% |
Will Deacon | 5 | 4.35% | 1 | 16.67% |
Peter Zijlstra | 3 | 2.61% | 1 | 16.67% |
Total | 115 | 100.00% | 6 | 100.00% |
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
/*
* we request the IRQ with a (possibly percpu) struct arm_pmu**, but
* the handlers expect a struct arm_pmu*. The percpu_irq framework will
* do any necessary shifting, we just need to perform the first
* dereference.
*/
armpmu = *(void **)dev;
plat_device = armpmu->plat_device;
plat = dev_get_platdata(&plat_device->dev);
start_clock = sched_clock();
if (plat && plat->handle_irq)
ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
else
ret = armpmu->handle_irq(irq, armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 33 | 27.97% | 1 | 14.29% |
Rabin Vincent | 33 | 27.97% | 1 | 14.29% |
Stephen Boyd | 19 | 16.10% | 1 | 14.29% |
Sudeep KarkadaNagesha | 18 | 15.25% | 1 | 14.29% |
Mark Rutland | 15 | 12.71% | 3 | 42.86% |
Total | 118 | 100.00% | 7 | 100.00% |
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
armpmu->free_irq(armpmu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 7 | 38.89% | 1 | 16.67% |
Mark Rutland | 6 | 33.33% | 2 | 33.33% |
Sudeep KarkadaNagesha | 4 | 22.22% | 2 | 33.33% |
Jamie Iles | 1 | 5.56% | 1 | 16.67% |
Total | 18 | 100.00% | 6 | 100.00% |
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
if (err) {
armpmu_release_hardware(armpmu);
return err;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 20 | 50.00% | 1 | 14.29% |
Mark Rutland | 11 | 27.50% | 3 | 42.86% |
Will Deacon | 6 | 15.00% | 1 | 14.29% |
Sudeep KarkadaNagesha | 3 | 7.50% | 2 | 28.57% |
Total | 40 | 100.00% | 7 | 100.00% |
static void
hw_perf_event_destroy(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
atomic_t *active_events = &armpmu->active_events;
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
armpmu_release_hardware(armpmu);
mutex_unlock(pmu_reserve_mutex);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 34 | 53.97% | 2 | 66.67% |
Jamie Iles | 29 | 46.03% | 1 | 33.33% |
Total | 63 | 100.00% | 3 | 100.00% |
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
return attr->exclude_idle || attr->exclude_user ||
attr->exclude_kernel || attr->exclude_hv;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 26 | 92.86% | 1 | 50.00% |
Jamie Iles | 2 | 7.14% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
static int
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int mapping;
mapping = armpmu->map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapping;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/*
* Check whether we need to exclude the counter from certain modes.
*/
if ((!armpmu->set_event_filter ||
armpmu->set_event_filter(hwc, &event->attr)) &&
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EOPNOTSUPP;
}
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (!is_sampling_event(event)) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
if (event->group_leader != event) {
if (validate_group(event) != 0)
return -EINVAL;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 143 | 65.30% | 1 | 11.11% |
Will Deacon | 52 | 23.74% | 3 | 33.33% |
Mark Rutland | 19 | 8.68% | 3 | 33.33% |
Vince Weaver | 4 | 1.83% | 1 | 11.11% |
Peter Zijlstra | 1 | 0.46% | 1 | 11.11% |
Total | 219 | 100.00% | 9 | 100.00% |
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
int err = 0;
atomic_t *active_events = &armpmu->active_events;
/*
* Reject CPU-affine events for CPUs that are of a different class to
* that which this PMU handles. Process-following events (where
* event->cpu == -1) can be migrated between CPUs, and thus we have to
* reject them later (in armpmu_add) if they're scheduled on a
* different class of CPU.
*/
if (event->cpu != -1 &&
!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
return -ENOENT;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(active_events)) {
mutex_lock(&armpmu->reserve_mutex);
if (atomic_read(active_events) == 0)
err = armpmu_reserve_hardware(armpmu);
if (!err)
atomic_inc(active_events);
mutex_unlock(&armpmu->reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamie Iles | 88 | 50.00% | 1 | 14.29% |
Mark Rutland | 66 | 37.50% | 4 | 57.14% |
Stéphane Eranian | 12 | 6.82% | 1 | 14.29% |
Peter Zijlstra | 10 | 5.68% | 1 | 14.29% |
Total | 176 | 100.00% | 7 | 100.00% |
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return;
if (enabled)
armpmu->start(armpmu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 43 | 58.11% | 6 | 50.00% |
Jamie Iles | 18 | 24.32% | 1 | 8.33% |
Peter Zijlstra | 6 | 8.11% | 3 | 25.00% |
Will Deacon | 4 | 5.41% | 1 | 8.33% |
Sudeep KarkadaNagesha | 3 | 4.05% | 1 | 8.33% |
Total | 74 | 100.00% | 12 | 100.00% |
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return;
armpmu->stop(armpmu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 26 | 59.09% | 2 | 33.33% |
Jamie Iles | 9 | 20.45% | 1 | 16.67% |
Peter Zijlstra | 6 | 13.64% | 2 | 33.33% |
Sudeep KarkadaNagesha | 3 | 6.82% | 1 | 16.67% |
Total | 44 | 100.00% | 6 | 100.00% |
/*
* In heterogeneous systems, events are specific to a particular
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
* the same microarchitecture.
*/
static int armpmu_filter_match(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
unsigned int cpu = smp_processor_id();
return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
static ssize_t armpmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
static struct attribute *armpmu_common_attrs[] = {
&dev_attr_cpus.attr,
NULL,
};
static struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
static void armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
armpmu->pmu = (struct pmu) {
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
.add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
.filter_match = armpmu_filter_match,
.attr_groups = armpmu->attr_groups,
};
armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
&armpmu_common_attr_group;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 57 | 56.44% | 4 | 57.14% |
Peter Zijlstra | 43 | 42.57% | 2 | 28.57% |
Jamie Iles | 1 | 0.99% | 1 | 14.29% |
Total | 101 | 100.00% | 7 | 100.00% |
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const char *perf_pmu_name(void)
{
if (!__oprofile_cpu_pmu)
return NULL;
return __oprofile_cpu_pmu->name;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
int max_events = 0;
if (__oprofile_cpu_pmu != NULL)
max_events = __oprofile_cpu_pmu->num_events;
return max_events;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(perf_num_counters);
static void cpu_pmu_enable_percpu_irq(void *data)
{
int irq = *(int *)data;
enable_percpu_irq(irq, IRQ_TYPE_NONE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static void cpu_pmu_disable_percpu_irq(void *data)
{
int irq = *(int *)data;
disable_percpu_irq(irq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
if (irq > 0 && irq_is_percpu(irq)) {
on_each_cpu_mask(&cpu_pmu->supported_cpus,
cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
if (cpu_pmu->irq_affinity)
cpu = cpu_pmu->irq_affinity[i];
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq > 0)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 170 | 95.51% | 1 | 33.33% |
Marc Zyngier | 8 | 4.49% | 2 | 66.67% |
Total | 178 | 100.00% | 3 | 100.00% |
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
if (!pmu_device)
return -ENODEV;
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
return 0;
}
irq = platform_get_irq(pmu_device, 0);
if (irq > 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
on_each_cpu_mask(&cpu_pmu->supported_cpus,
cpu_pmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
if (cpu_pmu->irq_affinity)
cpu = cpu_pmu->irq_affinity[i];
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
continue;
}
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 282 | 97.58% | 1 | 33.33% |
Marc Zyngier | 7 | 2.42% | 2 | 66.67% |
Total | 289 | 100.00% | 3 | 100.00% |
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
if (!cpumask_test_cpu