cregit-Linux how code gets into the kernel

Release 4.14 tools/perf/builtin-stat.c

Directory: tools/perf
/*
 * builtin-stat.c
 *
 * Builtin stat command: Give a precise performance counters summary
 * overview about any workload, CPU or specific PID.
 *
 * Sample output:

   $ perf stat ./hackbench 10

  Time: 0.118

  Performance counter stats for './hackbench 10':

       1708.761321 task-clock                #   11.037 CPUs utilized
            41,190 context-switches          #    0.024 M/sec
             6,735 CPU-migrations            #    0.004 M/sec
            17,318 page-faults               #    0.010 M/sec
     5,205,202,243 cycles                    #    3.046 GHz
     3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
     1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
     2,603,501,247 instructions              #    0.50  insns per cycle
                                             #    1.48  stalled cycles per insn
       484,357,498 branches                  #  283.455 M/sec
         6,388,934 branch-misses             #    1.32% of all branches

        0.154822978  seconds time elapsed

 *
 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *   Jaswinder Singh Rajput <jaswinder@kernel.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

#include "perf.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/util.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/counts.h"
#include "util/group.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/group.h"
#include "util/string2.h"
#include "asm/bug.h"

#include <linux/time64.h>
#include <api/fs/fs.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/prctl.h>
#include <inttypes.h>
#include <locale.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>

#include "sane_ctype.h"


#define DEFAULT_SEPARATOR	" "

#define CNTR_NOT_SUPPORTED	"<not supported>"

#define CNTR_NOT_COUNTED	"<not counted>"

#define FREEZE_ON_SMI_PATH	"devices/cpu/freeze_on_smi"

static void print_counters(struct timespec *ts, int argc, const char **argv);

/* Default events used for perf stat -T */

static const char *transaction_attrs = {
	"task-clock,"
	"{"
	"instructions,"
	"cycles,"
	"cpu/cycles-t/,"
	"cpu/tx-start/,"
	"cpu/el-start/,"
	"cpu/cycles-ct/"
	"}"
};

/* More limited version when the CPU does not have all events. */

static const char * transaction_limited_attrs = {
	"task-clock,"
	"{"
	"instructions,"
	"cycles,"
	"cpu/cycles-t/,"
	"cpu/tx-start/"
	"}"
};


static const char * topdown_attrs[] = {
	"topdown-total-slots",
	"topdown-slots-retired",
	"topdown-recovery-bubbles",
	"topdown-fetch-bubbles",
	"topdown-slots-issued",
	NULL,
};


static const char *smi_cost_attrs = {
	"{"
	"msr/aperf/,"
	"msr/smi/,"
	"cycles"
	"}"
};


static struct perf_evlist	*evsel_list;


static struct target target = {
	.uid	= UINT_MAX,
};


typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);


static int			run_count			=  1;

static bool			no_inherit			= false;

static volatile pid_t		child_pid			= -1;

static bool			null_run			=  false;

static int			detailed_run			=  0;

static bool			transaction_run;

static bool			topdown_run			= false;

static bool			smi_cost			= false;

static bool			smi_reset			= false;

static bool			big_num				=  true;

static int			big_num_opt			=  -1;

static const char		*csv_sep			= NULL;

static bool			csv_output			= false;

static bool			group				= false;

static const char		*pre_cmd			= NULL;

static const char		*post_cmd			= NULL;

static bool			sync_run			= false;

static unsigned int		initial_delay			= 0;

static unsigned int		unit_width			= 4; 
/* strlen("unit") */

static bool			forever				= false;

static bool			metric_only			= false;

static bool			force_metric_only		= false;

static bool			no_merge			= false;

static struct timespec		ref_time;

static struct cpu_map		*aggr_map;

static aggr_get_id_t		aggr_get_id;

static bool			append_file;

static const char		*output_name;

static int			output_fd;

static int			print_free_counters_hint;


struct perf_stat {
	
bool			 record;
	
struct perf_data_file	 file;
	
struct perf_session	*session;
	
u64			 bytes_written;
	
struct perf_tool	 tool;
	
bool			 maps_allocated;
	
struct cpu_map		*cpus;
	
struct thread_map	*threads;
	
enum aggr_mode		 aggr_mode;
};


static struct perf_stat		perf_stat;

#define STAT_RECORD		perf_stat.record


static volatile int done = 0;


static struct perf_stat_config stat_config = {
	.aggr_mode	= AGGR_GLOBAL,
	.scale		= true,
};


static inline void diff_timespec(struct timespec *r, struct timespec *a, struct timespec *b) { r->tv_sec = a->tv_sec - b->tv_sec; if (a->tv_nsec < b->tv_nsec) { r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; r->tv_sec--; } else { r->tv_nsec = a->tv_nsec - b->tv_nsec ; } }

Contributors

PersonTokensPropCommitsCommitProp
Stéphane Eranian7998.75%150.00%
Arnaldo Carvalho de Melo11.25%150.00%
Total80100.00%2100.00%


static void perf_stat__reset_stats(void) { perf_evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa16100.00%2100.00%
Total16100.00%2100.00%


static int create_perf_stat_counter(struct perf_evsel *evsel) { struct perf_event_attr *attr = &evsel->attr; struct perf_evsel *leader = evsel->leader; if (stat_config.scale) { attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; } /* * The event is part of non trivial group, let's enable * the group read (for leader) and ID retrieval for all * members. */ if (leader->nr_members > 1) attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; attr->inherit = !no_inherit; /* * Some events get initialized with sample_(period/type) set, * like tracepoints. Clear it up for counting. */ attr->sample_period = 0; /* * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless * while avoiding that older tools show confusing messages. * * However for pipe sessions we need to keep it zero, * because script's perf_evsel__check_attr is triggered * by attr->sample_type != 0, and we can't run it on * stat sessions. */ if (!(STAT_RECORD && perf_stat.file.is_pipe)) attr->sample_type = PERF_SAMPLE_IDENTIFIER; /* * Disabling all counters initially, they will be enabled * either manually by us or by kernel via enable_on_exec * set later. */ if (perf_evsel__is_group_leader(evsel)) { attr->disabled = 1; /* * In case of initial_delay we enable tracee * events manually. */ if (target__none(&target) && !initial_delay) attr->enable_on_exec = 1; } if (target__has_cpu(&target)) return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); return perf_evsel__open_per_thread(evsel, evsel_list->threads); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa14288.75%1191.67%
Andi Kleen1811.25%18.33%
Total160100.00%12100.00%

/* * Does the counter have nsecs as a unit? */
static inline int nsec_counter(struct perf_evsel *evsel) { if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa2668.42%150.00%
Frederik Deweerdt1231.58%150.00%
Total38100.00%2100.00%


static int process_synthesized_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) { pr_err("failed to write perf data, error: %m\n"); return -1; } perf_stat.bytes_written += event->header.size; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa73100.00%2100.00%
Total73100.00%2100.00%


static int write_stat_round_event(u64 tm, u64 type) { return perf_event__synthesize_stat_round(NULL, tm, type, process_synthesized_event, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa26100.00%2100.00%
Total26100.00%2100.00%

#define WRITE_STAT_ROUND_EVENT(time, interval) \ write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
static int perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread, struct perf_counts_values *count) { struct perf_sample_id *sid = SID(counter, cpu, thread); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa56100.00%1100.00%
Total56100.00%1100.00%

/* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */
static int read_counter(struct perf_evsel *counter) { int nthreads = thread_map__nr(evsel_list->threads); int ncpus, cpu, thread; if (target__has_cpu(&target)) ncpus = perf_evsel__nr_cpus(counter); else ncpus = 1; if (!counter->supported) return -ENOENT; if (counter->system_wide) nthreads = 1; for (thread = 0; thread < nthreads; thread++) { for (cpu = 0; cpu < ncpus; cpu++) { struct perf_counts_values *count; count = perf_counts(counter->counts, cpu, thread); /* * The leader's group read loads data into its group members * (via perf_evsel__read_counter) and sets threir count->loaded. */ if (!count->loaded && perf_evsel__read_counter(counter, cpu, thread)) { counter->counts->scaled = -1; perf_counts(counter->counts, cpu, thread)->ena = 0; perf_counts(counter->counts, cpu, thread)->run = 0; return -1; } count->loaded = false; if (STAT_RECORD) { if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { pr_err("failed to write stat event\n"); return -1; } } if (verbose > 1) { fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", perf_evsel__name(counter), cpu, count->val, count->ena, count->run); } } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa13652.31%433.33%
Andi Kleen4216.15%18.33%
Stéphane Eranian4115.77%18.33%
Mark Rutland218.08%18.33%
Ingo Molnar207.69%541.67%
Total260100.00%12100.00%


static void read_counters(void) { struct perf_evsel *counter; int ret; evlist__for_each_entry(evsel_list, counter) { ret = read_counter(counter); if (ret) pr_debug("failed to read counter %s\n", counter->name); if (ret == 0 && perf_stat_process_counter(&stat_config, counter)) pr_warning("failed to process counter %s\n", counter->name); } }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa4668.66%642.86%
Stéphane Eranian1319.40%17.14%
Ingo Molnar22.99%17.14%
Namhyung Kim22.99%214.29%
Arnaldo Carvalho de Melo11.49%17.14%
Andi Kleen11.49%17.14%
Peter Zijlstra11.49%17.14%
Mark Rutland11.49%17.14%
Total67100.00%14100.00%


static void process_interval(void) { struct timespec ts, rs; read_counters(); clock_gettime(CLOCK_MONOTONIC, &ts); diff_timespec(&rs, &ts, &ref_time); if (STAT_RECORD) { if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) pr_err("failed to write stat round event\n"); } print_counters(&rs, 0, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa5776.00%550.00%
Ingo Molnar1621.33%330.00%
Mark Rutland11.33%110.00%
Arnaldo Carvalho de Melo11.33%110.00%
Total75100.00%10100.00%


static void enable_counters(void) { if (initial_delay) usleep(initial_delay * USEC_PER_MSEC); /* * We need to enable counters only if: * - we don't have tracee (attaching to task or cpu) * - we have initial delay configured */ if (!target__none(&target) || initial_delay) perf_evlist__enable(evsel_list); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa2569.44%350.00%
Ingo Molnar822.22%116.67%
Namhyung Kim25.56%116.67%
Arnaldo Carvalho de Melo12.78%116.67%
Total36100.00%6100.00%


static void disable_counters(void) { /* * If we don't have tracee (attaching to task or cpu), counters may * still be running. To get accurate group ratios, we must stop groups * from counting before reading their constituent counters. */ if (!target__none(&target)) perf_evlist__disable(evsel_list); }

Contributors

PersonTokensPropCommitsCommitProp
Mark Rutland23100.00%1100.00%
Total23100.00%1100.00%

static volatile int workload_exec_errno; /* * perf_evlist__prepare_workload will send a SIGUSR1 * if the fork fails, since we asked by setting its * want_signal to true. */
static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, void *ucontext __maybe_unused) { workload_exec_errno = info->si_value.sival_int; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa2074.07%120.00%
Ingo Molnar414.81%120.00%
Stéphane Eranian13.70%120.00%
Arnaldo Carvalho de Melo13.70%120.00%
Irina Tirdea13.70%120.00%
Total27100.00%5100.00%


static bool has_unit(struct perf_evsel *counter) { return counter->unit && *counter->unit; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa21100.00%1100.00%
Total21100.00%1100.00%


static bool has_scale(struct perf_evsel *counter) { return counter->scale != 1; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa18100.00%1100.00%
Total18100.00%1100.00%


static int perf_stat_synthesize_config(bool is_pipe) { struct perf_evsel *counter; int err; if (is_pipe) { err = perf_event__synthesize_attrs(NULL, perf_stat.session, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize attrs.\n"); return err; } } /* * Synthesize other events stuff not carried within * attr event - unit, scale, name */ evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) continue; /* * Synthesize unit and scale only if it's defined. */ if (has_unit(counter)) { err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel unit.\n"); return err; } } if (has_scale(counter)) { err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel scale.\n"); return err; } } if (counter->own_cpus) { err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel scale.\n"); return err; } } /* * Name is needed only for pipe output, * perf.data carries event names. */ if (is_pipe) { err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel name.\n"); return err; } } } err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize thread map.\n"); return err; } err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize thread map.\n"); return err; } err = perf_event__synthesize_stat_config(NULL, &stat_config, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize config.\n"); return err; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa30599.67%375.00%
Arnaldo Carvalho de Melo10.33%125.00%
Total306100.00%4100.00%

#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
static int __store_counter_ids(struct perf_evsel *counter, struct cpu_map *cpus, struct thread_map *threads) { int cpu, thread; for (cpu = 0; cpu < cpus->nr; cpu++) { for (thread = 0; thread < threads->nr; thread++) { int fd = FD(counter, cpu, thread); if (perf_evlist__id_add_fd(evsel_list, counter, cpu, thread, fd) < 0) return -1; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa96100.00%1100.00%
Total96100.00%1100.00%


static int store_counter_ids(struct perf_evsel *counter) { struct cpu_map *cpus = counter->cpus; struct thread_map *threads = counter->threads; if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr)) return -ENOMEM; return __store_counter_ids(counter, cpus, threads); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa58100.00%1100.00%
Total58100.00%1100.00%


static bool perf_evsel__should_store_id(struct perf_evsel *counter) { return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa22100.00%1100.00%
Total22100.00%1100.00%


static int __run_perf_stat(int argc, const char **argv) { int interval = stat_config.interval; char msg[BUFSIZ]; unsigned long long t0, t1; struct perf_evsel *counter; struct timespec ts; size_t l; int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false; struct perf_evsel_config_term *err_term; if (interval) { ts.tv_sec = interval / USEC_PER_MSEC; ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC; } else { ts.tv_sec = 1; ts.tv_nsec = 0; } if (forks) { if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { perror("failed to prepare workload"); return -1; } child_pid = evsel_list->workload.pid; } if (group) perf_evlist__set_leader(evsel_list); evlist__for_each_entry(evsel_list, counter) { try_again: if (create_perf_stat_counter(counter) < 0) { /* * PPC returns ENXIO for HW counters until 2.6.37 * (behavior changed with commit b0a873e). */ if (errno == EINVAL || errno == ENOSYS || errno == ENOENT || errno == EOPNOTSUPP || errno == ENXIO) { if (verbose > 0) ui__warning("%s event is not supported by the kernel.\n", perf_evsel__name(counter)); counter->supported = false; if ((counter->leader != counter) || !(counter->leader->nr_members > 1)) continue; } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); goto try_again; } perf_evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); ui__error("%s\n", msg); if (child_pid != -1) kill(child_pid, SIGTERM); return -1; } counter->supported = true; l = strlen(counter->unit); if (l > unit_width) unit_width = l; if (perf_evsel__should_store_id(counter) && store_counter_ids(counter)) return -1; } if (perf_evlist__apply_filters(evsel_list, &counter)) { pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", counter->filter, perf_evsel__name(counter), errno, str_error_r(errno, msg, sizeof(msg))); return -1; } if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) { pr_err("failed to set config \"%s\" on event %s with %d (%s)\n", err_term->val.drv_cfg, perf_evsel__name(counter), errno, str_error_r(errno, msg, sizeof(msg))); return -1; } if (STAT_RECORD) { int err, fd = perf_data_file__fd(&perf_stat.file); if (is_pipe) { err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file)); } else { err = perf_session__write_header(perf_stat.session, evsel_list, fd, false); } if (err < 0) return err; err = perf_stat_synthesize_config(is_pipe); if (err < 0) return err; } /* * Enable counters and exec the command: */ t0 = rdclock(); clock_gettime(CLOCK_MONOTONIC, &ref_time); if (forks) { perf_evlist__start_workload(evsel_list); enable_counters(); if (interval) { while (!waitpid(child_pid, &status, WNOHANG)) { nanosleep(&ts, NULL); process_interval(); } } waitpid(child_pid, &status, 0); if (workload_exec_errno) { const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); pr_err("Workload failed: %s\n", emsg); return -1; } if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { enable_counters(); while (!done) { nanosleep(&ts, NULL); if (interval) process_interval(); } } disable_counters(); t1 = rdclock(); update_stats(&walltime_nsecs_stats, t1 - t0); /* * Closing a group leader splits the group, and as we only disable * group leaders, results in remaining events becoming enabled. To * avoid arbitrary skew, we must read all counters before closing any * group leaders. */ read_counters(); perf_evlist__close(evsel_list); return WEXITSTATUS(status); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa48966.08%1029.41%
Ingo Molnar8211.08%617.65%
Mathieu J. Poirier537.16%12.94%
Arnaldo Carvalho de Melo445.95%617.65%
Kan Liang212.84%12.94%
Namhyung Kim141.89%25.88%
Andi Kleen121.62%25.88%
Stéphane Eranian91.22%38.82%
Mark Rutland81.08%12.94%
Milian Wolff50.68%12.94%
Jaswinder Singh Rajput30.41%12.94%
Total740100.00%34100.00%


static int run_perf_stat(int argc, const char **argv) { int ret; if (pre_cmd) { ret = system(pre_cmd); if (ret) return ret; } if (sync_run) sync(); ret = __run_perf_stat(argc, argv); if (ret) return ret; if (post_cmd) { ret = system(post_cmd); if (ret) return ret; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa5666.67%116.67%
Ingo Molnar1416.67%233.33%
Anton Blanchard78.33%116.67%
Stéphane Eranian55.95%116.67%
Lucas De Marchi22.38%116.67%
Total84100.00%6100.00%


static void print_running(u64 run, u64 ena) { if (csv_output) { fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f", csv_sep, run, csv_sep, ena ? 100.0 * run / ena : 100.0); } else if (run != ena) { fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena); } }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa4971.01%375.00%
Ingo Molnar2028.99%125.00%
Total69100.00%4100.00%


static void print_noise_pct(double total, double avg) { double pct = rel_stddev_stats(total, avg); if (csv_output) fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct); else if (pct