cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/perf/core-book3s.c

/*
 * Performance event support - powerpc architecture code
 *
 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>


#define BHRB_MAX_ENTRIES	32

#define BHRB_TARGET		0x0000000000000002

#define BHRB_PREDICTION		0x0000000000000001

#define BHRB_EA			0xFFFFFFFFFFFFFFFCUL


struct cpu_hw_events {
	
int n_events;
	
int n_percpu;
	
int disabled;
	
int n_added;
	
int n_limited;
	
u8  pmcs_enabled;
	
struct perf_event *event[MAX_HWEVENTS];
	
u64 events[MAX_HWEVENTS];
	
unsigned int flags[MAX_HWEVENTS];
	/*
         * The order of the MMCR array is:
         *  - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
         *  - 32-bit, MMCR0, MMCR1, MMCR2
         */
	
unsigned long mmcr[4];
	
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
	
u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
	
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
	
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
	
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];

	
unsigned int txn_flags;
	
int n_txn_start;

	/* BHRB bits */
	
u64				bhrb_filter;	/* BHRB HW branch filter */
	
unsigned int			bhrb_users;
	
void				*bhrb_context;
	
struct	perf_branch_stack	bhrb_stack;
	
struct	perf_branch_entry	bhrb_entries[BHRB_MAX_ENTRIES];
	
u64				ic_init;
};

static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);


static struct power_pmu *ppmu;

/*
 * Normally, to ignore kernel events we set the FCS (freeze counters
 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
 * hypervisor bit set in the MSR, or if we are running on a processor
 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
 * then we need to use the FCHV bit to ignore kernel events.
 */

static unsigned int freeze_events_kernel = MMCR0_FCS;

/*
 * 32-bit doesn't have MMCRA but does have an MMCR2,
 * and a few other names are different.
 */
#ifdef CONFIG_PPC32


#define MMCR0_FCHV		0

#define MMCR0_PMCjCE		MMCR0_PMCnCE

#define MMCR0_FC56		0

#define MMCR0_PMAO		0

#define MMCR0_EBE		0

#define MMCR0_BHRBA		0

#define MMCR0_PMCC		0

#define MMCR0_PMCC_U6		0


#define SPRN_MMCRA		SPRN_MMCR2

#define MMCRA_SAMPLE_ENABLE	0


static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras16100.00%1100.00%
Total16100.00%1100.00%


static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras15100.00%1100.00%
Total15100.00%1100.00%


static inline u32 perf_get_misc_flags(struct pt_regs *regs) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras15100.00%1100.00%
Total15100.00%1100.00%


static inline void perf_read_regs(struct pt_regs *regs) { regs->result = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras1055.56%150.00%
Anton Blanchard844.44%150.00%
Total18100.00%2100.00%


static inline int perf_intr_is_nmi(struct pt_regs *regs) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras15100.00%1100.00%
Total15100.00%1100.00%


static inline int siar_valid(struct pt_regs *regs) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Sukadev Bhattiprolu15100.00%1100.00%
Total15100.00%1100.00%


static bool is_ebb_event(struct perf_event *event) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman14100.00%1100.00%
Total14100.00%1100.00%


static int ebb_event_check(struct perf_event *event) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman14100.00%1100.00%
Total14100.00%1100.00%


static void ebb_event_add(struct perf_event *event) { }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman10100.00%1100.00%
Total10100.00%1100.00%


static void ebb_switch_out(unsigned long mmcr0) { }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman9100.00%1100.00%
Total9100.00%1100.00%


static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) { return cpuhw->mmcr[0]; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman23100.00%2100.00%
Total23100.00%2100.00%


static inline void power_pmu_bhrb_enable(struct perf_event *event) {}

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling11100.00%1100.00%
Total11100.00%1100.00%


static inline void power_pmu_bhrb_disable(struct perf_event *event) {}

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling11100.00%1100.00%
Total11100.00%1100.00%


static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra861.54%133.33%
Michael Neuling430.77%133.33%
Anton Blanchard17.69%133.33%
Total13100.00%3100.00%


static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling11100.00%1100.00%
Total11100.00%1100.00%


static void pmao_restore_workaround(bool ebb) { }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman8100.00%1100.00%
Total8100.00%1100.00%


static bool use_ic(u64 event) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Madhavan Srinivasan12100.00%1100.00%
Total12100.00%1100.00%

#endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs) { /* * When we take a performance monitor exception the regs are setup * using perf_read_regs() which overloads some fields, in particular * regs->result to tell us whether to use SIAR. * * However if the regs are from another exception, eg. a syscall, then * they have not been setup using perf_read_regs() and so regs->result * is something random. */ return ((TRAP(regs) == 0xf00) && regs->result); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman1657.14%150.00%
Anton Blanchard1242.86%150.00%
Total28100.00%2100.00%

/* * Things that are specific to 64-bit implementations. */ #ifdef CONFIG_PPC64
static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) { unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; if (slot > 1) return 4 * (slot - 1); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras6391.30%150.00%
Michael Ellerman68.70%150.00%
Total69100.00%2100.00%

/* * The user wants a data address recorded. * If we're not doing instruction sampling, give them the SDAR * (sampled data address). If we are doing instruction sampling, then * only give them the SDAR if it corresponds to the instruction * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER. */
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { unsigned long mmcra = regs->dsisr; bool sdar_valid; if (ppmu->flags & PPMU_HAS_SIER) sdar_valid = regs->dar & SIER_SDAR_VALID; else { unsigned long sdsync; if (ppmu->flags & PPMU_SIAR_VALID) sdsync = POWER7P_MMCRA_SDAR_VALID; else if (ppmu->flags & PPMU_ALT_SIPR) sdsync = POWER6_MMCRA_SDSYNC; else if (ppmu->flags & PPMU_NO_SIAR) sdsync = MMCRA_SAMPLE_ENABLE; else sdsync = MMCRA_SDSYNC; sdar_valid = mmcra & sdsync; } if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) *addrp = mfspr(SPRN_SDAR); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras5244.07%125.00%
Michael Ellerman2924.58%125.00%
Sukadev Bhattiprolu2420.34%125.00%
Madhavan Srinivasan1311.02%125.00%
Total118100.00%4100.00%


static bool regs_sihv(struct pt_regs *regs) { unsigned long sihv = MMCRA_SIHV; if (ppmu->flags & PPMU_HAS_SIER) return !!(regs->dar & SIER_SIHV); if (ppmu->flags & PPMU_ALT_SIPR) sihv = POWER6_MMCRA_SIHV; return !!(regs->dsisr & sihv); }

Contributors

PersonTokensPropCommitsCommitProp
Anton Blanchard3254.24%133.33%
Michael Ellerman2745.76%266.67%
Total59100.00%3100.00%


static bool regs_sipr(struct pt_regs *regs) { unsigned long sipr = MMCRA_SIPR; if (ppmu->flags & PPMU_HAS_SIER) return !!(regs->dar & SIER_SIPR); if (ppmu->flags & PPMU_ALT_SIPR) sipr = POWER6_MMCRA_SIPR; return !!(regs->dsisr & sipr); }

Contributors

PersonTokensPropCommitsCommitProp
Anton Blanchard3254.24%133.33%
Michael Ellerman2745.76%266.67%
Total59100.00%3100.00%


static inline u32 perf_flags_from_msr(struct pt_regs *regs) { if (regs->msr & MSR_PR) return PERF_RECORD_MISC_USER; if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV) return PERF_RECORD_MISC_HYPERVISOR; return PERF_RECORD_MISC_KERNEL; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt43100.00%1100.00%
Total43100.00%1100.00%


static inline u32 perf_get_misc_flags(struct pt_regs *regs) { bool use_siar = regs_use_siar(regs); if (!use_siar) return perf_flags_from_msr(regs); /* * If we don't have flags in MMCRA, rather than using * the MSR, we intuit the flags from the address in * SIAR which should give slightly more reliable * results */ if (ppmu->flags & PPMU_NO_SIPR) { unsigned long siar = mfspr(SPRN_SIAR); if (is_kernel_addr(siar)) return PERF_RECORD_MISC_KERNEL; return PERF_RECORD_MISC_USER; } /* PR has priority over HV, so order below is important */ if (regs_sipr(regs)) return PERF_RECORD_MISC_USER; if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; return PERF_RECORD_MISC_KERNEL; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt3638.30%110.00%
Paul Mackerras2425.53%110.00%
Michael Ellerman1313.83%330.00%
Anton Blanchard88.51%220.00%
Michael Neuling77.45%110.00%
Ingo Molnar33.19%110.00%
Madhavan Srinivasan33.19%110.00%
Total94100.00%10100.00%

/* * Overload regs->dsisr to store MMCRA so we only need to read it once * on each interrupt. * Overload regs->dar to store SIER if we have it. * Overload regs->result to specify whether we should use the MSR (result * is zero) or the SIAR (result is non zero). */
static inline void perf_read_regs(struct pt_regs *regs) { unsigned long mmcra = mfspr(SPRN_MMCRA); int marked = mmcra & MMCRA_SAMPLE_ENABLE; int use_siar; regs->dsisr = mmcra; if (ppmu->flags & PPMU_HAS_SIER) regs->dar = mfspr(SPRN_SIER); /* * If this isn't a PMU exception (eg a software event) the SIAR is * not valid. Use pt_regs. * * If it is a marked event use the SIAR. * * If the PMU doesn't update the SIAR for non marked events use * pt_regs. * * If the PMU has HV/PR flags then check to see if they * place the exception in userspace. If so, use pt_regs. In * continuous sampling mode the SIAR and the PMU exception are * not synchronised, so they may be many instructions apart. * This can result in confusing backtraces. We still want * hypervisor samples as well as samples in the kernel with * interrupts off hence the userspace check. */ if (TRAP(regs) != 0xf00) use_siar = 0; else if ((ppmu->flags & PPMU_NO_SIAR)) use_siar = 0; else if (marked) use_siar = 1; else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) use_siar = 0; else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs)) use_siar = 0; else use_siar = 1; regs->result = use_siar; }

Contributors

PersonTokensPropCommitsCommitProp
Anton Blanchard7453.24%228.57%
Michael Ellerman3323.74%342.86%
Paul Mackerras1712.23%114.29%
Madhavan Srinivasan1510.79%114.29%
Total139100.00%7100.00%

/* * If interrupts were soft-disabled when a PMU interrupt occurs, treat * it as an NMI. */
static inline int perf_intr_is_nmi(struct pt_regs *regs) { return !regs->softe; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras18100.00%1100.00%
Total18100.00%1100.00%

/* * On processors like P7+ that have the SIAR-Valid bit, marked instructions * must be sampled only if the SIAR-valid bit is set. * * For unmarked instructions and for processors that don't have the SIAR-Valid * bit, assume that SIAR is valid. */
static inline int siar_valid(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; int marked = mmcra & MMCRA_SAMPLE_ENABLE; if (marked) { if (ppmu->flags & PPMU_HAS_SIER) return regs->dar & SIER_SIAR_VALID; if (ppmu->flags & PPMU_SIAR_VALID) return mmcra & POWER7P_MMCRA_SIAR_VALID; } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Sukadev Bhattiprolu4367.19%150.00%
Michael Ellerman2132.81%150.00%
Total64100.00%2100.00%

/* Reset all possible BHRB entries */
static void power_pmu_bhrb_reset(void) { asm volatile(PPC_CLRBHRB); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling763.64%150.00%
Paul Mackerras436.36%150.00%
Total11100.00%2100.00%


static void power_pmu_bhrb_enable(struct perf_event *event) { struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); if (!ppmu->bhrb_nr) return; /* Clear BHRB if we changed task context to avoid data leaks */ if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { power_pmu_bhrb_reset(); cpuhw->bhrb_context = event->ctx; } cpuhw->bhrb_users++; perf_sched_cb_inc(event->ctx->pmu); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling5270.27%125.00%
Paul Mackerras1114.86%125.00%
Peter Zijlstra912.16%125.00%
Christoph Lameter22.70%125.00%
Total74100.00%4100.00%


static void power_pmu_bhrb_disable(struct perf_event *event) { struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); if (!ppmu->bhrb_nr) return; WARN_ON_ONCE(!cpuhw->bhrb_users); cpuhw->bhrb_users--; perf_sched_cb_dec(event->ctx->pmu); if (!cpuhw->disabled && !cpuhw->bhrb_users) { /* BHRB cannot be turned off when other * events are active on the PMU. */ /* avoid stale pointer */ cpuhw->bhrb_context = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling5067.57%120.00%
Peter Zijlstra912.16%120.00%
Paul Mackerras810.81%120.00%
Anshuman Khandual56.76%120.00%
Christoph Lameter22.70%120.00%
Total74100.00%5100.00%

/* Called from ctxsw to prevent one process's branch entries to * mingle with the other process's entries during context switch. */
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) { if (!ppmu->bhrb_nr) return; if (sched_in) power_pmu_bhrb_reset(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1448.28%125.00%
Paul Mackerras827.59%125.00%
Michael Neuling620.69%125.00%
Anton Blanchard13.45%125.00%
Total29100.00%4100.00%

/* Calculate the to address for a branch */
static __u64 power_pmu_bhrb_to(u64 addr) { unsigned int instr; int ret; __u64 target; if (is_kernel_addr(addr)) return branch_target((unsigned int *)addr); /* Userspace: need copy instruction here then translate it */ pagefault_disable(); ret = __get_user_inatomic(instr, (unsigned int __user *)addr); if (ret) { pagefault_enable(); return 0; } pagefault_enable(); target = branch_target(&instr); if ((!target) || (instr & BRANCH_ABSOLUTE)) return target; /* Translate relative branch target from kernel to user address */ return target - (unsigned long)&instr + addr; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling108100.00%1100.00%
Total108100.00%1100.00%

/* Processing BHRB entries */
static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) { u64 val; u64 addr; int r_index, u_index, pred; r_index = 0; u_index = 0; while (r_index < ppmu->bhrb_nr) { /* Assembly read function */ val = read_bhrb(r_index++); if (!val) /* Terminal marker: End of valid BHRB entries */ break; else { addr = val & BHRB_EA; pred = val & BHRB_PREDICTION; if (!addr) /* invalid entry */ continue; /* Branches are read most recent first (ie. mfbhrb 0 is * the most recent branch). * There are two types of valid entries: * 1) a target entry which is the to address of a * computed goto like a blr,bctr,btar. The next * entry read from the bhrb will be branch * corresponding to this target (ie. the actual * blr/bctr/btar instruction). * 2) a from address which is an actual branch. If a * target entry proceeds this, then this is the * matching branch for that target. If this is not * following a target entry, then this is a branch * where the target is given as an immediate field * in the instruction (ie. an i or b form branch). * In this case we need to read the instruction from * memory to determine the target/to address. */ if (val & BHRB_TARGET) { /* Target branches use two entries * (ie. computed gotos/XL form) */ cpuhw->bhrb_entries[u_index].to = addr; cpuhw->bhrb_entries[u_index].mispred = pred; cpuhw->bhrb_entries[u_index].predicted = ~pred; /* Get from address in next entry */ val = read_bhrb(r_index++); addr = val & BHRB_EA; if (val & BHRB_TARGET) { /* Shouldn't have two targets in a row.. Reset index and try again */ r_index--; addr = 0; } cpuhw->bhrb_entries[u_index].from = addr; } else { /* Branches to immediate field (ie I or B form) */ cpuhw->bhrb_entries[u_index].from = addr; cpuhw->bhrb_entries[u_index].to = power_pmu_bhrb_to(addr); cpuhw->bhrb_entries[u_index].mispred = pred; cpuhw->bhrb_entries[u_index].predicted = ~pred; } u_index++; } } cpuhw->bhrb_stack.nr = u_index; return; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling11650.43%225.00%
Michael Ellerman8235.65%112.50%
Paul Mackerras3013.04%337.50%
Anton Blanchard10.43%112.50%
Ingo Molnar10.43%112.50%
Total230100.00%8100.00%


static bool is_ebb_event(struct perf_event *event) { /* * This could be a per-PMU callback, but we'd rather avoid the cost. We * check that the PMU supports EBB, meaning those that don't can still * use bit 63 of the event code for something else if they wish. */ return (ppmu->flags & PPMU_ARCH_207S) && ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman3497.14%266.67%
Joel Stanley12.86%133.33%
Total35100.00%3100.00%


static int ebb_event_check(struct perf_event *event) { struct perf_event *leader = event->group_leader; /* Event and group leader must agree on EBB */ if (is_ebb_event(leader) != is_ebb_event(event)) return -EINVAL; if (is_ebb_event(event)) { if (!(event->attach_state & PERF_ATTACH_TASK)) return -EINVAL; if (!leader->attr.pinned || !leader->attr.exclusive) return -EINVAL; if (event->attr.freq || event->attr.inherit || event->attr.sample_type || event->attr.sample_period || event->attr.enable_on_exec) return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman120100.00%2100.00%
Total120100.00%2100.00%


static void ebb_event_add(struct perf_event *event) { if (!is_ebb_event(event) || current->thread.used_ebb) return; /* * IFF this is the first time we've added an EBB event, set * PMXE in the user MMCR0 so we can detect when it's cleared by * userspace. We need this so that we can context switch while * userspace is in the EBB handler (where PMXE is 0). */ current->thread.used_ebb = 1; current->thread.mmcr0 |= MMCR0_PMXE; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman43100.00%1100.00%
Total43100.00%1100.00%


static void ebb_switch_out(unsigned long mmcr0) { if (!(mmcr0 & MMCR0_EBE)) return; current->thread.siar = mfspr(SPRN_SIAR); current->thread.sier = mfspr(SPRN_SIER); current->thread.sdar = mfspr(SPRN_SDAR); current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman7092.11%125.00%
Michael Neuling45.26%125.00%
Paul Mackerras22.63%250.00%
Total76100.00%4100.00%


static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) { unsigned long mmcr0 = cpuhw->mmcr[0]; if (!ebb) goto out; /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */ mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6; /* * Add any bits from the user MMCR0, FC or PMAO. This is compatible * with pmao_restore_workaround() because we may add PMAO but we never * clear it here. */ mmcr0 |= current->thread.mmcr0; /* * Be careful not to set PMXE if userspace had it cleared. This is also * compatible with pmao_restore_workaround() because it has already * cleared PMXE and we leave PMAO alone. */ if (!(current->thread.mmcr0 & MMCR0_PMXE)) mmcr0 &= ~MMCR0_PMXE; mtspr(SPRN_SIAR, current->thread.siar); mtspr(SPRN_SIER, current->thread.sier); mtspr(SPRN_SDAR, current->thread.sdar); /* * Merge the kernel & user values of MMCR2. The semantics we implement * are that the user MMCR2 can set bits, ie. cause counters to freeze, * but not clear bits. If a task wants to be able to clear bits, ie. * unfreeze counters, it should not set exclude_xxx in its events and * instead manage the MMCR2 entirely by itself. */ mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); out: return mmcr0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman11085.94%444.44%
Michael Neuling129.38%333.33%
Paul Mackerras64.69%222.22%
Total128100.00%9100.00%


static void pmao_restore_workaround(bool ebb) { unsigned pmcs[6]; if (!cpu_has_feature(CPU_FTR_PMAO_BUG)) return; /* * On POWER8E there is a hardware defect which affects the PMU context * switch logic, ie. power_pmu_disable/enable(). * * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0 * by the hardware. Sometime later the actual PMU exception is * delivered. * * If we context switch, or simply disable/enable, the PMU prior to the * exception arriving, the exception will be lost when we clear PMAO. * * When we reenable the PMU, we will write the saved MMCR0 with PMAO * set, and this _should_ generate an exception. However because of the * defect no exception is generated when we write PMAO, and we get * stuck with no counters counting but no exception delivered. * * The workaround is to detect this case and tweak the hardware to * create another pending PMU exception. * * We do that by setting up PMC6 (cycles) for an imminent overflow and * enabling the PMU. That causes a new exception to be generated in the * chip, but we don't take it yet because we have interrupts hard * disabled. We then write back the PMU state as we want it to be seen * by the exception handler. When we reenable interrupts the exception * handler will be called and see the correct state. * * The logic is the same for EBB, except that the exception is gated by * us having interrupts hard disabled as well as the fact that we are * not in userspace. The exception is finally delivered when we return * to userspace. */ /* Only if PMAO is set and PMAO_SYNC is clear */ if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO) return; /* If we're doing EBB, only if BESCR[GE] is set */ if (ebb && !(current->thread.bescr & BESCR_GE)) return; /* * We are already soft-disabled in power_pmu_enable(). We need to hard * disable to actually prevent the PMU exception from firing. */ hard_irq_disable(); /* * This is a bit gross, but we know we're on POWER8E and have 6 PMCs. * Using read/write_pmc() in a for loop adds 12 function calls and * almost doubles our code size. */ pmcs[0] = mfspr(SPRN_PMC1); pmcs[1] = mfspr(SPRN_PMC2); pmcs[2] = mfspr(SPRN_PMC3); pmcs[3] = mfspr(SPRN_PMC4); pmcs[4] = mfspr(SPRN_PMC5); pmcs[5] = mfspr(SPRN_PMC6); /* Ensure all freeze bits are unset */ mtspr(SPRN_MMCR2, 0); /* Set up PMC6 to overflow in one cycle */ mtspr(SPRN_PMC6, 0x7FFFFFFE); /* Enable exceptions and unfreeze PMC6 */ mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO); /* Now we need to refreeze and restore the PMCs */ mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO); mtspr(SPRN_PMC1, pmcs[0]); mtspr(SPRN_PMC2, pmcs[1]); mtspr(SPRN_PMC3, pmcs[2]); mtspr(SPRN_PMC4, pmcs[3]); mtspr(SPRN_PMC5, pmcs[4]); mtspr(SPRN_PMC6, pmcs[5]); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman22499.56%150.00%
Madhavan Srinivasan10.44%150.00%
Total225100.00%2100.00%


static bool use_ic(u64 event) { if (cpu_has_feature(CPU_FTR_POWER9_DD1) && (event == 0x200f2 || event == 0x300f2)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Madhavan Srinivasan32100.00%1100.00%
Total32100.00%1100.00%

#endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); /* * Read one performance monitor counter (PMC). */
static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 1: val = mfspr(SPRN_PMC1); break; case 2: val = mfspr(SPRN_PMC2); break; case 3: val = mfspr(SPRN_PMC3); break; case 4: val = mfspr(SPRN_PMC4); break; case 5: val = mfspr(SPRN_PMC5); break; case 6: val = mfspr(SPRN_PMC6); break; #ifdef CONFIG_PPC64 case 7: val = mfspr(SPRN_PMC7); break; case 8: val = mfspr(SPRN_PMC8); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling11286.15%150.00%
Paul Mackerras1813.85%150.00%
Total130100.00%2100.00%

/* * Write one PMC. */
static void write_pmc(int idx, unsigned long val) { switch (idx) { case 1: mtspr(SPRN_PMC1, val); break; case 2: mtspr(SPRN_PMC2, val); break; case 3: mtspr(SPRN_PMC3, val); break; case 4: mtspr(SPRN_PMC4, val); break; case 5: mtspr(SPRN_PMC5, val); break; case 6: mtspr(SPRN_PMC6, val); break; #ifdef CONFIG_PPC64 case 7: mtspr(SPRN_PMC7, val); break; case 8: mtspr(SPRN_PMC8, val); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling10989.34%150.00%
Paul Mackerras1310.66%150.00%
Total122100.00%2100.00%

/* Called from sysrq_handle_showregs() */
void perf_event_print_debug(void) { unsigned long sdar, sier, flags; u32 pmcs[MAX_HWEVENTS]; int i; if (!ppmu) { pr_info("Performance monitor hardware not registered.\n"); return; } if (!ppmu->n_counter) return; local_irq_save(flags); pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d", smp_processor_id(), ppmu->name, ppmu->n_counter); for (i = 0; i < ppmu->n_counter; i++) pmcs[i] = read_pmc(i + 1); for (; i < MAX_HWEVENTS; i++) pmcs[i] = 0xdeadbeef; pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n", pmcs[0], pmcs[1], pmcs[2], pmcs[3]); if (ppmu->n_counter > 4) pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n", pmcs[4], pmcs[5], pmcs[6], pmcs