cregit-Linux how code gets into the kernel

Release 4.10 tools/perf/builtin-stat.c

Directory: tools/perf
/*
 * builtin-stat.c
 *
 * Builtin stat command: Give a precise performance counters summary
 * overview about any workload, CPU or specific PID.
 *
 * Sample output:

   $ perf stat ./hackbench 10

  Time: 0.118

  Performance counter stats for './hackbench 10':

       1708.761321 task-clock                #   11.037 CPUs utilized
            41,190 context-switches          #    0.024 M/sec
             6,735 CPU-migrations            #    0.004 M/sec
            17,318 page-faults               #    0.010 M/sec
     5,205,202,243 cycles                    #    3.046 GHz
     3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
     1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
     2,603,501,247 instructions              #    0.50  insns per cycle
                                             #    1.48  stalled cycles per insn
       484,357,498 branches                  #  283.455 M/sec
         6,388,934 branch-misses             #    1.32% of all branches

        0.154822978  seconds time elapsed

 *
 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *   Jaswinder Singh Rajput <jaswinder@kernel.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

#include "perf.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/util.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/counts.h"
#include "util/group.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/group.h"
#include "asm/bug.h"

#include <linux/time64.h>
#include <api/fs/fs.h>
#include <stdlib.h>
#include <sys/prctl.h>
#include <locale.h>
#include <math.h>


#define DEFAULT_SEPARATOR	" "

#define CNTR_NOT_SUPPORTED	"<not supported>"

#define CNTR_NOT_COUNTED	"<not counted>"

static void print_counters(struct timespec *ts, int argc, const char **argv);

/* Default events used for perf stat -T */

static const char *transaction_attrs = {
	"task-clock,"
	"{"
	"instructions,"
	"cycles,"
	"cpu/cycles-t/,"
	"cpu/tx-start/,"
	"cpu/el-start/,"
	"cpu/cycles-ct/"
	"}"
};

/* More limited version when the CPU does not have all events. */

static const char * transaction_limited_attrs = {
	"task-clock,"
	"{"
	"instructions,"
	"cycles,"
	"cpu/cycles-t/,"
	"cpu/tx-start/"
	"}"
};


static const char * topdown_attrs[] = {
	"topdown-total-slots",
	"topdown-slots-retired",
	"topdown-recovery-bubbles",
	"topdown-fetch-bubbles",
	"topdown-slots-issued",
	NULL,
};


static struct perf_evlist	*evsel_list;


static struct target target = {
	.uid	= UINT_MAX,
};


typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);


static int			run_count			=  1;

static bool			no_inherit			= false;

static volatile pid_t		child_pid			= -1;

static bool			null_run			=  false;

static int			detailed_run			=  0;

static bool			transaction_run;

static bool			topdown_run			= false;

static bool			big_num				=  true;

static int			big_num_opt			=  -1;

static const char		*csv_sep			= NULL;

static bool			csv_output			= false;

static bool			group				= false;

static const char		*pre_cmd			= NULL;

static const char		*post_cmd			= NULL;

static bool			sync_run			= false;

static unsigned int		initial_delay			= 0;

static unsigned int		unit_width			= 4; 
/* strlen("unit") */

static bool			forever				= false;

static bool			metric_only			= false;

static bool			force_metric_only		= false;

static struct timespec		ref_time;

static struct cpu_map		*aggr_map;

static aggr_get_id_t		aggr_get_id;

static bool			append_file;

static const char		*output_name;

static int			output_fd;


struct perf_stat {
	
bool			 record;
	
struct perf_data_file	 file;
	
struct perf_session	*session;
	
u64			 bytes_written;
	
struct perf_tool	 tool;
	
bool			 maps_allocated;
	
struct cpu_map		*cpus;
	
struct thread_map	*threads;
	
enum aggr_mode		 aggr_mode;
};


static struct perf_stat		perf_stat;

#define STAT_RECORD		perf_stat.record


static volatile int done = 0;


static struct perf_stat_config stat_config = {
	.aggr_mode	= AGGR_GLOBAL,
	.scale		= true,
};


static inline void diff_timespec(struct timespec *r, struct timespec *a, struct timespec *b) { r->tv_sec = a->tv_sec - b->tv_sec; if (a->tv_nsec < b->tv_nsec) { r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; r->tv_sec--; } else { r->tv_nsec = a->tv_nsec - b->tv_nsec ; } }

Contributors

PersonTokensPropCommitsCommitProp
stephane eranianstephane eranian7998.75%150.00%
arnaldo carvalho de meloarnaldo carvalho de melo11.25%150.00%
Total80100.00%2100.00%


static void perf_stat__reset_stats(void) { perf_evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa16100.00%2100.00%
Total16100.00%2100.00%


static int create_perf_stat_counter(struct perf_evsel *evsel) { struct perf_event_attr *attr = &evsel->attr; if (stat_config.scale) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; attr->inherit = !no_inherit; /* * Some events get initialized with sample_(period/type) set, * like tracepoints. Clear it up for counting. */ attr->sample_period = 0; /* * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless * while avoiding that older tools show confusing messages. * * However for pipe sessions we need to keep it zero, * because script's perf_evsel__check_attr is triggered * by attr->sample_type != 0, and we can't run it on * stat sessions. */ if (!(STAT_RECORD && perf_stat.file.is_pipe)) attr->sample_type = PERF_SAMPLE_IDENTIFIER; /* * Disabling all counters initially, they will be enabled * either manually by us or by kernel via enable_on_exec * set later. */ if (perf_evsel__is_group_leader(evsel)) { attr->disabled = 1; /* * In case of initial_delay we enable tracee * events manually. */ if (target__none(&target) && !initial_delay) attr->enable_on_exec = 1; } if (target__has_cpu(&target)) return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); return perf_evsel__open_per_thread(evsel, evsel_list->threads); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa10478.79%1168.75%
andi kleenandi kleen1511.36%16.25%
arnaldo carvalho de meloarnaldo carvalho de melo75.30%212.50%
ingo molnaringo molnar53.79%16.25%
paul mackerraspaul mackerras10.76%16.25%
Total132100.00%16100.00%

/* * Does the counter have nsecs as a unit? */
static inline int nsec_counter(struct perf_evsel *evsel) { if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa1950.00%150.00%
peter zijlstrapeter zijlstra1950.00%150.00%
Total38100.00%2100.00%


static int process_synthesized_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) { pr_err("failed to write perf data, error: %m\n"); return -1; } perf_stat.bytes_written += event->header.size; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa73100.00%2100.00%
Total73100.00%2100.00%


static int write_stat_round_event(u64 tm, u64 type) { return perf_event__synthesize_stat_round(NULL, tm, type, process_synthesized_event, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa26100.00%2100.00%
Total26100.00%2100.00%

#define WRITE_STAT_ROUND_EVENT(time, interval) \ write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
static int perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread, struct perf_counts_values *count) { struct perf_sample_id *sid = SID(counter, cpu, thread); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa56100.00%1100.00%
Total56100.00%1100.00%

/* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */
static int read_counter(struct perf_evsel *counter) { int nthreads = thread_map__nr(evsel_list->threads); int ncpus, cpu, thread; if (target__has_cpu(&target)) ncpus = perf_evsel__nr_cpus(counter); else ncpus = 1; if (!counter->supported) return -ENOENT; if (counter->system_wide) nthreads = 1; for (thread = 0; thread < nthreads; thread++) { for (cpu = 0; cpu < ncpus; cpu++) { struct perf_counts_values *count; count = perf_counts(counter->counts, cpu, thread); if (perf_evsel__read(counter, cpu, thread, count)) return -1; if (STAT_RECORD) { if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { pr_err("failed to write stat event\n"); return -1; } } if (verbose > 1) { fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", perf_evsel__name(counter), cpu, count->val, count->ena, count->run); } } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa12861.24%350.00%
andi kleenandi kleen4220.10%116.67%
mark rutlandmark rutland2110.05%116.67%
namhyung kimnamhyung kim188.61%116.67%
Total209100.00%6100.00%


static void read_counters(void) { struct perf_evsel *counter; evlist__for_each_entry(evsel_list, counter) { if (read_counter(counter)) pr_debug("failed to read counter %s\n", counter->name); if (perf_stat_process_counter(&stat_config, counter)) pr_warning("failed to process counter %s\n", counter->name); } }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa4580.36%550.00%
ingo molnaringo molnar814.29%220.00%
mark rutlandmark rutland11.79%110.00%
arnaldo carvalho de meloarnaldo carvalho de melo11.79%110.00%
andi kleenandi kleen11.79%110.00%
Total56100.00%10100.00%


static void process_interval(void) { struct timespec ts, rs; read_counters(); clock_gettime(CLOCK_MONOTONIC, &ts); diff_timespec(&rs, &ts, &ref_time); if (STAT_RECORD) { if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) pr_err("failed to write stat round event\n"); } print_counters(&rs, 0, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa5978.67%550.00%
ingo molnaringo molnar1418.67%330.00%
mark rutlandmark rutland11.33%110.00%
arnaldo carvalho de meloarnaldo carvalho de melo11.33%110.00%
Total75100.00%10100.00%


static void enable_counters(void) { if (initial_delay) usleep(initial_delay * USEC_PER_MSEC); /* * We need to enable counters only if: * - we don't have tracee (attaching to task or cpu) * - we have initial delay configured */ if (!target__none(&target) || initial_delay) perf_evlist__enable(evsel_list); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa2569.44%350.00%
ingo molnaringo molnar822.22%116.67%
namhyung kimnamhyung kim25.56%116.67%
arnaldo carvalho de meloarnaldo carvalho de melo12.78%116.67%
Total36100.00%6100.00%


static void disable_counters(void) { /* * If we don't have tracee (attaching to task or cpu), counters may * still be running. To get accurate group ratios, we must stop groups * from counting before reading their constituent counters. */ if (!target__none(&target)) perf_evlist__disable(evsel_list); }

Contributors

PersonTokensPropCommitsCommitProp
mark rutlandmark rutland23100.00%1100.00%
Total23100.00%1100.00%

static volatile int workload_exec_errno; /* * perf_evlist__prepare_workload will send a SIGUSR1 * if the fork fails, since we asked by setting its * want_signal to true. */
static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, void *ucontext __maybe_unused) { workload_exec_errno = info->si_value.sival_int; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa2074.07%120.00%
ingo molnaringo molnar414.81%120.00%
arnaldo carvalho de meloarnaldo carvalho de melo13.70%120.00%
irina tirdeairina tirdea13.70%120.00%
stephane eranianstephane eranian13.70%120.00%
Total27100.00%5100.00%


static bool has_unit(struct perf_evsel *counter) { return counter->unit && *counter->unit; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa21100.00%1100.00%
Total21100.00%1100.00%


static bool has_scale(struct perf_evsel *counter) { return counter->scale != 1; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa18100.00%1100.00%
Total18100.00%1100.00%


static int perf_stat_synthesize_config(bool is_pipe) { struct perf_evsel *counter; int err; if (is_pipe) { err = perf_event__synthesize_attrs(NULL, perf_stat.session, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize attrs.\n"); return err; } } /* * Synthesize other events stuff not carried within * attr event - unit, scale, name */ evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) continue; /* * Synthesize unit and scale only if it's defined. */ if (has_unit(counter)) { err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel unit.\n"); return err; } } if (has_scale(counter)) { err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel scale.\n"); return err; } } if (counter->own_cpus) { err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel scale.\n"); return err; } } /* * Name is needed only for pipe output, * perf.data carries event names. */ if (is_pipe) { err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize evsel name.\n"); return err; } } } err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize thread map.\n"); return err; } err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize thread map.\n"); return err; } err = perf_event__synthesize_stat_config(NULL, &stat_config, process_synthesized_event, NULL); if (err < 0) { pr_err("Couldn't synthesize config.\n"); return err; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa30599.67%375.00%
arnaldo carvalho de meloarnaldo carvalho de melo10.33%125.00%
Total306100.00%4100.00%

#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
static int __store_counter_ids(struct perf_evsel *counter, struct cpu_map *cpus, struct thread_map *threads) { int cpu, thread; for (cpu = 0; cpu < cpus->nr; cpu++) { for (thread = 0; thread < threads->nr; thread++) { int fd = FD(counter, cpu, thread); if (perf_evlist__id_add_fd(evsel_list, counter, cpu, thread, fd) < 0) return -1; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa96100.00%1100.00%
Total96100.00%1100.00%


static int store_counter_ids(struct perf_evsel *counter) { struct cpu_map *cpus = counter->cpus; struct thread_map *threads = counter->threads; if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr)) return -ENOMEM; return __store_counter_ids(counter, cpus, threads); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa58100.00%1100.00%
Total58100.00%1100.00%


static int __run_perf_stat(int argc, const char **argv) { int interval = stat_config.interval; char msg[512]; unsigned long long t0, t1; struct perf_evsel *counter; struct timespec ts; size_t l; int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false; struct perf_evsel_config_term *err_term; if (interval) { ts.tv_sec = interval / USEC_PER_MSEC; ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC; } else { ts.tv_sec = 1; ts.tv_nsec = 0; } if (forks) { if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { perror("failed to prepare workload"); return -1; } child_pid = evsel_list->workload.pid; } if (group) perf_evlist__set_leader(evsel_list); evlist__for_each_entry(evsel_list, counter) { try_again: if (create_perf_stat_counter(counter) < 0) { /* * PPC returns ENXIO for HW counters until 2.6.37 * (behavior changed with commit b0a873e). */ if (errno == EINVAL || errno == ENOSYS || errno == ENOENT || errno == EOPNOTSUPP || errno == ENXIO) { if (verbose) ui__warning("%s event is not supported by the kernel.\n", perf_evsel__name(counter)); counter->supported = false; if ((counter->leader != counter) || !(counter->leader->nr_members > 1)) continue; } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { if (verbose) ui__warning("%s\n", msg); goto try_again; } perf_evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); ui__error("%s\n", msg); if (child_pid != -1) kill(child_pid, SIGTERM); return -1; } counter->supported = true; l = strlen(counter->unit); if (l > unit_width) unit_width = l; if (STAT_RECORD && store_counter_ids(counter)) return -1; } if (perf_evlist__apply_filters(evsel_list, &counter)) { error("failed to set filter \"%s\" on event %s with %d (%s)\n", counter->filter, perf_evsel__name(counter), errno, str_error_r(errno, msg, sizeof(msg))); return -1; } if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) { error("failed to set config \"%s\" on event %s with %d (%s)\n", err_term->val.drv_cfg, perf_evsel__name(counter), errno, str_error_r(errno, msg, sizeof(msg))); return -1; } if (STAT_RECORD) { int err, fd = perf_data_file__fd(&perf_stat.file); if (is_pipe) { err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file)); } else { err = perf_session__write_header(perf_stat.session, evsel_list, fd, false); } if (err < 0) return err; err = perf_stat_synthesize_config(is_pipe); if (err < 0) return err; } /* * Enable counters and exec the command: */ t0 = rdclock(); clock_gettime(CLOCK_MONOTONIC, &ref_time); if (forks) { perf_evlist__start_workload(evsel_list); enable_counters(); if (interval) { while (!waitpid(child_pid, &status, WNOHANG)) { nanosleep(&ts, NULL); process_interval(); } } wait(&status); if (workload_exec_errno) { const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); pr_err("Workload failed: %s\n", emsg); return -1; } if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { enable_counters(); while (!done) { nanosleep(&ts, NULL); if (interval) process_interval(); } } disable_counters(); t1 = rdclock(); update_stats(&walltime_nsecs_stats, t1 - t0); /* * Closing a group leader splits the group, and as we only disable * group leaders, results in remaining events becoming enabled. To * avoid arbitrary skew, we must read all counters before closing any * group leaders. */ read_counters(); perf_evlist__close(evsel_list); return WEXITSTATUS(status); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa49267.49%931.03%
ingo molnaringo molnar8211.25%620.69%
mathieu j. poiriermathieu j. poirier547.41%13.45%
arnaldo carvalho de meloarnaldo carvalho de melo415.62%413.79%
kan liangkan liang212.88%13.45%
andi kleenandi kleen121.65%26.90%
stephane eranianstephane eranian91.23%310.34%
mark rutlandmark rutland81.10%13.45%
namhyung kimnamhyung kim70.96%13.45%
jaswinder singh rajputjaswinder singh rajput30.41%13.45%
Total729100.00%29100.00%


static int run_perf_stat(int argc, const char **argv) { int ret; if (pre_cmd) { ret = system(pre_cmd); if (ret) return ret; } if (sync_run) sync(); ret = __run_perf_stat(argc, argv); if (ret) return ret; if (post_cmd) { ret = system(post_cmd); if (ret) return ret; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa5666.67%116.67%
ingo molnaringo molnar1416.67%233.33%
anton blanchardanton blanchard78.33%116.67%
stephane eranianstephane eranian55.95%116.67%
lucas de marchilucas de marchi22.38%116.67%
Total84100.00%6100.00%


static void print_running(u64 run, u64 ena) { if (csv_output) { fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f", csv_sep, run, csv_sep, ena ? 100.0 * run / ena : 100.0); } else if (run != ena) { fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena); } }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa4971.01%375.00%
ingo molnaringo molnar2028.99%125.00%
Total69100.00%4100.00%


static void print_noise_pct(double total, double avg) { double pct = rel_stddev_stats(total, avg); if (csv_output) fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct); else if (pct) fprintf(stat_config.output, " ( +-%6.2f%% )", pct); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa3563.64%360.00%
andi kleenandi kleen1221.82%120.00%
ingo molnaringo molnar814.55%120.00%
Total55100.00%5100.00%


static void print_noise(struct perf_evsel *evsel, double avg) { struct perf_stat_evsel *ps; if (run_count == 1) return; ps = evsel->priv; print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); }

Contributors

PersonTokensPropCommitsCommitProp
jiri olsajiri olsa4185.42%250.00%
andi kleenandi kleen714.58%250.00%
Total48100.00%4100.00%


static void aggr_printout(struct perf_evsel *evsel, int id, int nr) { switch (stat_config.aggr_mode) { case AGGR_CORE: fprintf(stat_config.output, "S%d-C%*d%s%*d%s", cpu_map__id_to_socket(id)