Release 4.14 tools/perf/builtin-stat.c
/*
* builtin-stat.c
*
* Builtin stat command: Give a precise performance counters summary
* overview about any workload, CPU or specific PID.
*
* Sample output:
$ perf stat ./hackbench 10
Time: 0.118
Performance counter stats for './hackbench 10':
1708.761321 task-clock # 11.037 CPUs utilized
41,190 context-switches # 0.024 M/sec
6,735 CPU-migrations # 0.004 M/sec
17,318 page-faults # 0.010 M/sec
5,205,202,243 cycles # 3.046 GHz
3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
2,603,501,247 instructions # 0.50 insns per cycle
# 1.48 stalled cycles per insn
484,357,498 branches # 283.455 M/sec
6,388,934 branch-misses # 1.32% of all branches
0.154822978 seconds time elapsed
*
* Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
*
* Improvements and fixes by:
*
* Arjan van de Ven <arjan@linux.intel.com>
* Yanmin Zhang <yanmin.zhang@intel.com>
* Wu Fengguang <fengguang.wu@intel.com>
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
* Jaswinder Singh Rajput <jaswinder@kernel.org>
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "perf.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/util.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/counts.h"
#include "util/group.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/group.h"
#include "util/string2.h"
#include "asm/bug.h"
#include <linux/time64.h>
#include <api/fs/fs.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/prctl.h>
#include <inttypes.h>
#include <locale.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>
#include "sane_ctype.h"
#define DEFAULT_SEPARATOR " "
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
static void print_counters(struct timespec *ts, int argc, const char **argv);
/* Default events used for perf stat -T */
static const char *transaction_attrs = {
"task-clock,"
"{"
"instructions,"
"cycles,"
"cpu/cycles-t/,"
"cpu/tx-start/,"
"cpu/el-start/,"
"cpu/cycles-ct/"
"}"
};
/* More limited version when the CPU does not have all events. */
static const char * transaction_limited_attrs = {
"task-clock,"
"{"
"instructions,"
"cycles,"
"cpu/cycles-t/,"
"cpu/tx-start/"
"}"
};
static const char * topdown_attrs[] = {
"topdown-total-slots",
"topdown-slots-retired",
"topdown-recovery-bubbles",
"topdown-fetch-bubbles",
"topdown-slots-issued",
NULL,
};
static const char *smi_cost_attrs = {
"{"
"msr/aperf/,"
"msr/smi/,"
"cycles"
"}"
};
static struct perf_evlist *evsel_list;
static struct target target = {
.uid = UINT_MAX,
};
typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
static int run_count = 1;
static bool no_inherit = false;
static volatile pid_t child_pid = -1;
static bool null_run = false;
static int detailed_run = 0;
static bool transaction_run;
static bool topdown_run = false;
static bool smi_cost = false;
static bool smi_reset = false;
static bool big_num = true;
static int big_num_opt = -1;
static const char *csv_sep = NULL;
static bool csv_output = false;
static bool group = false;
static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
static unsigned int initial_delay = 0;
static unsigned int unit_width = 4;
/* strlen("unit") */
static bool forever = false;
static bool metric_only = false;
static bool force_metric_only = false;
static bool no_merge = false;
static struct timespec ref_time;
static struct cpu_map *aggr_map;
static aggr_get_id_t aggr_get_id;
static bool append_file;
static const char *output_name;
static int output_fd;
static int print_free_counters_hint;
struct perf_stat {
bool record;
struct perf_data_file file;
struct perf_session *session;
u64 bytes_written;
struct perf_tool tool;
bool maps_allocated;
struct cpu_map *cpus;
struct thread_map *threads;
enum aggr_mode aggr_mode;
};
static struct perf_stat perf_stat;
#define STAT_RECORD perf_stat.record
static volatile int done = 0;
static struct perf_stat_config stat_config = {
.aggr_mode = AGGR_GLOBAL,
.scale = true,
};
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
r->tv_sec = a->tv_sec - b->tv_sec;
if (a->tv_nsec < b->tv_nsec) {
r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
r->tv_sec--;
} else {
r->tv_nsec = a->tv_nsec - b->tv_nsec ;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stéphane Eranian | 79 | 98.75% | 1 | 50.00% |
Arnaldo Carvalho de Melo | 1 | 1.25% | 1 | 50.00% |
Total | 80 | 100.00% | 2 | 100.00% |
static void perf_stat__reset_stats(void)
{
perf_evlist__reset_stats(evsel_list);
perf_stat__reset_shadow_stats();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 16 | 100.00% | 2 | 100.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static int create_perf_stat_counter(struct perf_evsel *evsel)
{
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
if (stat_config.scale) {
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
}
/*
* The event is part of non trivial group, let's enable
* the group read (for leader) and ID retrieval for all
* members.
*/
if (leader->nr_members > 1)
attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
attr->inherit = !no_inherit;
/*
* Some events get initialized with sample_(period/type) set,
* like tracepoints. Clear it up for counting.
*/
attr->sample_period = 0;
/*
* But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
* while avoiding that older tools show confusing messages.
*
* However for pipe sessions we need to keep it zero,
* because script's perf_evsel__check_attr is triggered
* by attr->sample_type != 0, and we can't run it on
* stat sessions.
*/
if (!(STAT_RECORD && perf_stat.file.is_pipe))
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
/*
* Disabling all counters initially, they will be enabled
* either manually by us or by kernel via enable_on_exec
* set later.
*/
if (perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1;
/*
* In case of initial_delay we enable tracee
* events manually.
*/
if (target__none(&target) && !initial_delay)
attr->enable_on_exec = 1;
}
if (target__has_cpu(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 142 | 88.75% | 11 | 91.67% |
Andi Kleen | 18 | 11.25% | 1 | 8.33% |
Total | 160 | 100.00% | 12 | 100.00% |
/*
* Does the counter have nsecs as a unit?
*/
static inline int nsec_counter(struct perf_evsel *evsel)
{
if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 26 | 68.42% | 1 | 50.00% |
Frederik Deweerdt | 12 | 31.58% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
perf_stat.bytes_written += event->header.size;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 73 | 100.00% | 2 | 100.00% |
Total | 73 | 100.00% | 2 | 100.00% |
static int write_stat_round_event(u64 tm, u64 type)
{
return perf_event__synthesize_stat_round(NULL, tm, type,
process_synthesized_event,
NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 26 | 100.00% | 2 | 100.00% |
Total | 26 | 100.00% | 2 | 100.00% |
#define WRITE_STAT_ROUND_EVENT(time, interval) \
write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
static int
perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
struct perf_counts_values *count)
{
struct perf_sample_id *sid = SID(counter, cpu, thread);
return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
process_synthesized_event, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 56 | 100.00% | 1 | 100.00% |
Total | 56 | 100.00% | 1 | 100.00% |
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
static int read_counter(struct perf_evsel *counter)
{
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus, cpu, thread;
if (target__has_cpu(&target))
ncpus = perf_evsel__nr_cpus(counter);
else
ncpus = 1;
if (!counter->supported)
return -ENOENT;
if (counter->system_wide)
nthreads = 1;
for (thread = 0; thread < nthreads; thread++) {
for (cpu = 0; cpu < ncpus; cpu++) {
struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu, thread);
/*
* The leader's group read loads data into its group members
* (via perf_evsel__read_counter) and sets threir count->loaded.
*/
if (!count->loaded &&
perf_evsel__read_counter(counter, cpu, thread)) {
counter->counts->scaled = -1;
perf_counts(counter->counts, cpu, thread)->ena = 0;
perf_counts(counter->counts, cpu, thread)->run = 0;
return -1;
}
count->loaded = false;
if (STAT_RECORD) {
if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
pr_err("failed to write stat event\n");
return -1;
}
}
if (verbose > 1) {
fprintf(stat_config.output,
"%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
perf_evsel__name(counter),
cpu,
count->val, count->ena, count->run);
}
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 136 | 52.31% | 4 | 33.33% |
Andi Kleen | 42 | 16.15% | 1 | 8.33% |
Stéphane Eranian | 41 | 15.77% | 1 | 8.33% |
Mark Rutland | 21 | 8.08% | 1 | 8.33% |
Ingo Molnar | 20 | 7.69% | 5 | 41.67% |
Total | 260 | 100.00% | 12 | 100.00% |
static void read_counters(void)
{
struct perf_evsel *counter;
int ret;
evlist__for_each_entry(evsel_list, counter) {
ret = read_counter(counter);
if (ret)
pr_debug("failed to read counter %s\n", counter->name);
if (ret == 0 && perf_stat_process_counter(&stat_config, counter))
pr_warning("failed to process counter %s\n", counter->name);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 46 | 68.66% | 6 | 42.86% |
Stéphane Eranian | 13 | 19.40% | 1 | 7.14% |
Ingo Molnar | 2 | 2.99% | 1 | 7.14% |
Namhyung Kim | 2 | 2.99% | 2 | 14.29% |
Arnaldo Carvalho de Melo | 1 | 1.49% | 1 | 7.14% |
Andi Kleen | 1 | 1.49% | 1 | 7.14% |
Peter Zijlstra | 1 | 1.49% | 1 | 7.14% |
Mark Rutland | 1 | 1.49% | 1 | 7.14% |
Total | 67 | 100.00% | 14 | 100.00% |
static void process_interval(void)
{
struct timespec ts, rs;
read_counters();
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
if (STAT_RECORD) {
if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
pr_err("failed to write stat round event\n");
}
print_counters(&rs, 0, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 57 | 76.00% | 5 | 50.00% |
Ingo Molnar | 16 | 21.33% | 3 | 30.00% |
Mark Rutland | 1 | 1.33% | 1 | 10.00% |
Arnaldo Carvalho de Melo | 1 | 1.33% | 1 | 10.00% |
Total | 75 | 100.00% | 10 | 100.00% |
static void enable_counters(void)
{
if (initial_delay)
usleep(initial_delay * USEC_PER_MSEC);
/*
* We need to enable counters only if:
* - we don't have tracee (attaching to task or cpu)
* - we have initial delay configured
*/
if (!target__none(&target) || initial_delay)
perf_evlist__enable(evsel_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 25 | 69.44% | 3 | 50.00% |
Ingo Molnar | 8 | 22.22% | 1 | 16.67% |
Namhyung Kim | 2 | 5.56% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 1 | 2.78% | 1 | 16.67% |
Total | 36 | 100.00% | 6 | 100.00% |
static void disable_counters(void)
{
/*
* If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop groups
* from counting before reading their constituent counters.
*/
if (!target__none(&target))
perf_evlist__disable(evsel_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Rutland | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static volatile int workload_exec_errno;
/*
* perf_evlist__prepare_workload will send a SIGUSR1
* if the fork fails, since we asked by setting its
* want_signal to true.
*/
static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
void *ucontext __maybe_unused)
{
workload_exec_errno = info->si_value.sival_int;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 20 | 74.07% | 1 | 20.00% |
Ingo Molnar | 4 | 14.81% | 1 | 20.00% |
Stéphane Eranian | 1 | 3.70% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 1 | 3.70% | 1 | 20.00% |
Irina Tirdea | 1 | 3.70% | 1 | 20.00% |
Total | 27 | 100.00% | 5 | 100.00% |
static bool has_unit(struct perf_evsel *counter)
{
return counter->unit && *counter->unit;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static bool has_scale(struct perf_evsel *counter)
{
return counter->scale != 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static int perf_stat_synthesize_config(bool is_pipe)
{
struct perf_evsel *counter;
int err;
if (is_pipe) {
err = perf_event__synthesize_attrs(NULL, perf_stat.session,
process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
return err;
}
}
/*
* Synthesize other events stuff not carried within
* attr event - unit, scale, name
*/
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported)
continue;
/*
* Synthesize unit and scale only if it's defined.
*/
if (has_unit(counter)) {
err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize evsel unit.\n");
return err;
}
}
if (has_scale(counter)) {
err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize evsel scale.\n");
return err;
}
}
if (counter->own_cpus) {
err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize evsel scale.\n");
return err;
}
}
/*
* Name is needed only for pipe output,
* perf.data carries event names.
*/
if (is_pipe) {
err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize evsel name.\n");
return err;
}
}
}
err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
process_synthesized_event,
NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
}
err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
}
err = perf_event__synthesize_stat_config(NULL, &stat_config,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize config.\n");
return err;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 305 | 99.67% | 3 | 75.00% |
Arnaldo Carvalho de Melo | 1 | 0.33% | 1 | 25.00% |
Total | 306 | 100.00% | 4 | 100.00% |
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
static int __store_counter_ids(struct perf_evsel *counter,
struct cpu_map *cpus,
struct thread_map *threads)
{
int cpu, thread;
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
int fd = FD(counter, cpu, thread);
if (perf_evlist__id_add_fd(evsel_list, counter,
cpu, thread, fd) < 0)
return -1;
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 96 | 100.00% | 1 | 100.00% |
Total | 96 | 100.00% | 1 | 100.00% |
static int store_counter_ids(struct perf_evsel *counter)
{
struct cpu_map *cpus = counter->cpus;
struct thread_map *threads = counter->threads;
if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
return -ENOMEM;
return __store_counter_ids(counter, cpus, threads);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
static bool perf_evsel__should_store_id(struct perf_evsel *counter)
{
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static int __run_perf_stat(int argc, const char **argv)
{
int interval = stat_config.interval;
char msg[BUFSIZ];
unsigned long long t0, t1;
struct perf_evsel *counter;
struct timespec ts;
size_t l;
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false;
struct perf_evsel_config_term *err_term;
if (interval) {
ts.tv_sec = interval / USEC_PER_MSEC;
ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
} else {
ts.tv_sec = 1;
ts.tv_nsec = 0;
}
if (forks) {
if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
workload_exec_failed_signal) < 0) {
perror("failed to prepare workload");
return -1;
}
child_pid = evsel_list->workload.pid;
}
if (group)
perf_evlist__set_leader(evsel_list);
evlist__for_each_entry(evsel_list, counter) {
try_again:
if (create_perf_stat_counter(counter) < 0) {
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
*/
if (errno == EINVAL || errno == ENOSYS ||
errno == ENOENT || errno == EOPNOTSUPP ||
errno == ENXIO) {
if (verbose > 0)
ui__warning("%s event is not supported by the kernel.\n",
perf_evsel__name(counter));
counter->supported = false;
if ((counter->leader != counter) ||
!(counter->leader->nr_members > 1))
continue;
} else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
perf_evsel__open_strerror(counter, &target,
errno, msg, sizeof(msg));
ui__error("%s\n", msg);
if (child_pid != -1)
kill(child_pid, SIGTERM);
return -1;
}
counter->supported = true;
l = strlen(counter->unit);
if (l > unit_width)
unit_width = l;
if (perf_evsel__should_store_id(counter) &&
store_counter_ids(counter))
return -1;
}
if (perf_evlist__apply_filters(evsel_list, &counter)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, perf_evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
if (STAT_RECORD) {
int err, fd = perf_data_file__fd(&perf_stat.file);
if (is_pipe) {
err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file));
} else {
err = perf_session__write_header(perf_stat.session, evsel_list,
fd, false);
}
if (err < 0)
return err;
err = perf_stat_synthesize_config(is_pipe);
if (err < 0)
return err;
}
/*
* Enable counters and exec the command:
*/
t0 = rdclock();
clock_gettime(CLOCK_MONOTONIC, &ref_time);
if (forks) {
perf_evlist__start_workload(evsel_list);
enable_counters();
if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) {
nanosleep(&ts, NULL);
process_interval();
}
}
waitpid(child_pid, &status, 0);
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
pr_err("Workload failed: %s\n", emsg);
return -1;
}
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
enable_counters();
while (!done) {
nanosleep(&ts, NULL);
if (interval)
process_interval();
}
}
disable_counters();
t1 = rdclock();
update_stats(&walltime_nsecs_stats, t1 - t0);
/*
* Closing a group leader splits the group, and as we only disable
* group leaders, results in remaining events becoming enabled. To
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
read_counters();
perf_evlist__close(evsel_list);
return WEXITSTATUS(status);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 489 | 66.08% | 10 | 29.41% |
Ingo Molnar | 82 | 11.08% | 6 | 17.65% |
Mathieu J. Poirier | 53 | 7.16% | 1 | 2.94% |
Arnaldo Carvalho de Melo | 44 | 5.95% | 6 | 17.65% |
Kan Liang | 21 | 2.84% | 1 | 2.94% |
Namhyung Kim | 14 | 1.89% | 2 | 5.88% |
Andi Kleen | 12 | 1.62% | 2 | 5.88% |
Stéphane Eranian | 9 | 1.22% | 3 | 8.82% |
Mark Rutland | 8 | 1.08% | 1 | 2.94% |
Milian Wolff | 5 | 0.68% | 1 | 2.94% |
Jaswinder Singh Rajput | 3 | 0.41% | 1 | 2.94% |
Total | 740 | 100.00% | 34 | 100.00% |
static int run_perf_stat(int argc, const char **argv)
{
int ret;
if (pre_cmd) {
ret = system(pre_cmd);
if (ret)
return ret;
}
if (sync_run)
sync();
ret = __run_perf_stat(argc, argv);
if (ret)
return ret;
if (post_cmd) {
ret = system(post_cmd);
if (ret)
return ret;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 56 | 66.67% | 1 | 16.67% |
Ingo Molnar | 14 | 16.67% | 2 | 33.33% |
Anton Blanchard | 7 | 8.33% | 1 | 16.67% |
Stéphane Eranian | 5 | 5.95% | 1 | 16.67% |
Lucas De Marchi | 2 | 2.38% | 1 | 16.67% |
Total | 84 | 100.00% | 6 | 100.00% |
static void print_running(u64 run, u64 ena)
{
if (csv_output) {
fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
csv_sep,
run,
csv_sep,
ena ? 100.0 * run / ena : 100.0);
} else if (run != ena) {
fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 49 | 71.01% | 3 | 75.00% |
Ingo Molnar | 20 | 28.99% | 1 | 25.00% |
Total | 69 | 100.00% | 4 | 100.00% |
static void print_noise_pct(double total, double avg)
{
double pct = rel_stddev_stats(total, avg);
if (csv_output)
fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
else if (pct