Release 4.12 drivers/perf/arm_pmu_acpi.c
  
  
  
/*
 * ACPI probing code for ARM performance counters.
 *
 * Copyright (C) 2017 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/acpi.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
#include <asm/cputype.h>
static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
static DEFINE_PER_CPU(int, pmu_irqs);
static int arm_pmu_acpi_register_irq(int cpu)
{
	struct acpi_madt_generic_interrupt *gicc;
	int gsi, trigger;
	gicc = acpi_cpu_get_madt_gicc(cpu);
	if (WARN_ON(!gicc))
		return -EINVAL;
	gsi = gicc->performance_interrupt;
	/*
         * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
         * have an interrupt. QEMU advertises this by using a GSI of zero,
         * which is not known to be valid on any hardware despite being
         * valid per the spec. Take the pragmatic approach and reject a
         * GSI of zero for now.
         */
	if (!gsi)
		return 0;
	if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
		trigger = ACPI_EDGE_SENSITIVE;
	else
		trigger = ACPI_LEVEL_SENSITIVE;
	/*
         * Helpfully, the MADT GICC doesn't have a polarity flag for the
         * "performance interrupt". Luckily, on compliant GICs the polarity is
         * a fixed value in HW (for both SPIs and PPIs) that we cannot change
         * from SW.
         *
         * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
         * may not match the real polarity, but that should not matter.
         *
         * Other interrupt controllers are not supported with ACPI.
         */
	return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 74 | 89.16% | 1 | 50.00% | 
| Wei Huang | 9 | 10.84% | 1 | 50.00% | 
| Total | 83 | 100.00% | 2 | 100.00% | 
static void arm_pmu_acpi_unregister_irq(int cpu)
{
	struct acpi_madt_generic_interrupt *gicc;
	int gsi;
	gicc = acpi_cpu_get_madt_gicc(cpu);
	if (!gicc)
		return;
	gsi = gicc->performance_interrupt;
	acpi_unregister_gsi(gsi);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 41 | 100.00% | 1 | 100.00% | 
| Total | 41 | 100.00% | 1 | 100.00% | 
static int arm_pmu_acpi_parse_irqs(void)
{
	int irq, cpu, irq_cpu, err;
	for_each_possible_cpu(cpu) {
		irq = arm_pmu_acpi_register_irq(cpu);
		if (irq < 0) {
			err = irq;
			pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
				cpu, err);
			goto out_err;
		} else if (irq == 0) {
			pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
		}
		per_cpu(pmu_irqs, cpu) = irq;
	}
	return 0;
out_err:
	for_each_possible_cpu(cpu) {
		irq = per_cpu(pmu_irqs, cpu);
		if (!irq)
			continue;
		arm_pmu_acpi_unregister_irq(cpu);
		/*
                 * Blat all copies of the IRQ so that we only unregister the
                 * corresponding GSI once (e.g. when we have PPIs).
                 */
		for_each_possible_cpu(irq_cpu) {
			if (per_cpu(pmu_irqs, irq_cpu) == irq)
				per_cpu(pmu_irqs, irq_cpu) = 0;
		}
	}
	return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 140 | 100.00% | 1 | 100.00% | 
| Total | 140 | 100.00% | 1 | 100.00% | 
static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
{
	unsigned long cpuid = read_cpuid_id();
	struct arm_pmu *pmu;
	int cpu;
	for_each_possible_cpu(cpu) {
		pmu = per_cpu(probed_pmus, cpu);
		if (!pmu || pmu->acpi_cpuid != cpuid)
			continue;
		return pmu;
	}
	pmu = armpmu_alloc();
	if (!pmu) {
		pr_warn("Unable to allocate PMU for CPU%d\n",
			smp_processor_id());
		return NULL;
	}
	pmu->acpi_cpuid = cpuid;
	return pmu;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 87 | 100.00% | 1 | 100.00% | 
| Total | 87 | 100.00% | 1 | 100.00% | 
/*
 * This must run before the common arm_pmu hotplug logic, so that we can
 * associate a CPU and its interrupt before the common code tries to manage the
 * affinity and so on.
 *
 * Note that hotplug events are serialized, so we cannot race with another CPU
 * coming up. The perf core won't open events while a hotplug event is in
 * progress.
 */
static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
{
	struct arm_pmu *pmu;
	struct pmu_hw_events __percpu *hw_events;
	int irq;
	/* If we've already probed this CPU, we have nothing to do */
	if (per_cpu(probed_pmus, cpu))
		return 0;
	irq = per_cpu(pmu_irqs, cpu);
	pmu = arm_pmu_acpi_find_alloc_pmu();
	if (!pmu)
		return -ENOMEM;
	cpumask_set_cpu(cpu, &pmu->supported_cpus);
	per_cpu(probed_pmus, cpu) = pmu;
	/*
         * Log and request the IRQ so the core arm_pmu code can manage it.  In
         * some situations (e.g. mismatched PPIs), we may fail to request the
         * IRQ. However, it may be too late for us to do anything about it.
         * The common ARM PMU code will log a warning in this case.
         */
	hw_events = pmu->hw_events;
	per_cpu(hw_events->irq, cpu) = irq;
	armpmu_request_irq(pmu, cpu);
	/*
         * Ideally, we'd probe the PMU here when we find the first matching
         * CPU. We can't do that for several reasons; see the comment in
         * arm_pmu_acpi_init().
         *
         * So for the time being, we're done.
         */
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 108 | 100.00% | 1 | 100.00% | 
| Total | 108 | 100.00% | 1 | 100.00% | 
int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
{
	int pmu_idx = 0;
	int cpu, ret;
	if (acpi_disabled)
		return 0;
	/*
         * Initialise and register the set of PMUs which we know about right
         * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
         * could handle late hotplug, but this may lead to deadlock since we
         * might try to register a hotplug notifier instance from within a
         * hotplug notifier.
         *
         * There's also the problem of having access to the right init_fn,
         * without tying this too deeply into the "real" PMU driver.
         *
         * For the moment, as with the platform/DT case, we need at least one
         * of a PMU's CPUs to be online at probe time.
         */
	for_each_possible_cpu(cpu) {
		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
		char *base_name;
		if (!pmu || pmu->name)
			continue;
		ret = init_fn(pmu);
		if (ret == -ENODEV) {
			/* PMU not handled by this driver, or not present */
			continue;
		} else if (ret) {
			pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
			return ret;
		}
		base_name = pmu->name;
		pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
		if (!pmu->name) {
			pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
			return -ENOMEM;
		}
		ret = armpmu_register(pmu);
		if (ret) {
			pr_warn("Failed to register PMU for CPU%d\n", cpu);
			return ret;
		}
	}
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 161 | 100.00% | 1 | 100.00% | 
| Total | 161 | 100.00% | 1 | 100.00% | 
static int arm_pmu_acpi_init(void)
{
	int ret;
	if (acpi_disabled)
		return 0;
	/*
         * We can't request IRQs yet, since we don't know the cookie value
         * until we know which CPUs share the same logical PMU. We'll handle
         * that in arm_pmu_acpi_cpu_starting().
         */
	ret = arm_pmu_acpi_parse_irqs();
	if (ret)
		return ret;
	ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
				"perf/arm/pmu_acpi:starting",
				arm_pmu_acpi_cpu_starting, NULL);
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 47 | 100.00% | 1 | 100.00% | 
| Total | 47 | 100.00% | 1 | 100.00% | 
subsys_initcall(arm_pmu_acpi_init)
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Rutland | 700 | 98.73% | 1 | 50.00% | 
| Wei Huang | 9 | 1.27% | 1 | 50.00% | 
| Total | 709 | 100.00% | 2 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.