Release 4.14 tools/perf/builtin-record.c
// SPDX-License-Identifier: GPL-2.0
/*
* builtin-record.c
*
* Builtin record command: Record the profile of a workload
* (or a CPU, or a PID) into the perf.data output file - for
* later analysis via perf report.
*/
#include "builtin.h"
#include "perf.h"
#include "util/build-id.h"
#include "util/util.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/config.h"
#include "util/callchain.h"
#include "util/cgroup.h"
#include "util/header.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/drv_configs.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/data.h"
#include "util/perf_regs.h"
#include "util/auxtrace.h"
#include "util/tsc.h"
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/llvm-utils.h"
#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
#include "util/time-utils.h"
#include "util/units.h"
#include "asm/bug.h"
#include <errno.h>
#include <inttypes.h>
#include <poll.h>
#include <unistd.h>
#include <sched.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <asm/bug.h>
#include <linux/time64.h>
struct switch_output {
bool enabled;
bool signal;
unsigned long size;
unsigned long time;
const char *str;
bool set;
};
struct record {
struct perf_tool tool;
struct record_opts opts;
u64 bytes_written;
struct perf_data_file file;
struct auxtrace_record *itr;
struct perf_evlist *evlist;
struct perf_session *session;
const char *progname;
int realtime_prio;
bool no_buildid;
bool no_buildid_set;
bool no_buildid_cache;
bool no_buildid_cache_set;
bool buildid_all;
bool timestamp_filename;
struct switch_output switch_output;
unsigned long long samples;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
trigger_is_ready(&switch_output_trigger);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static bool switch_output_size(struct record *rec)
{
return rec->switch_output.size &&
trigger_is_ready(&switch_output_trigger) &&
(rec->bytes_written >= rec->switch_output.size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static bool switch_output_time(struct record *rec)
{
return rec->switch_output.time &&
trigger_is_ready(&switch_output_trigger);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Olsa | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static int record__write(struct record *rec, void *bf, size_t size)
{
if (perf_data_file__write(rec->session->file, bf, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
rec->bytes_written += size;
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 24 | 35.29% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 17 | 25.00% | 3 | 37.50% |
Jiri Olsa | 16 | 23.53% | 2 | 25.00% |
David Ahern | 10 | 14.71% | 1 | 12.50% |
Adrian Hunter | 1 | 1.47% | 1 | 12.50% |
Total | 68 | 100.00% | 8 | 100.00% |
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct record *rec = container_of(tool, struct record, tool);
return record__write(rec, event, event->header.size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 54 | 94.74% | 9 | 81.82% |
Irina Tirdea | 2 | 3.51% | 1 | 9.09% |
Jiri Olsa | 1 | 1.75% | 1 | 9.09% |
Total | 57 | 100.00% | 11 | 100.00% |
static int
backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
pr_debug("Finished reading backward ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
return 0;
}
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
pr_debug("Finished reading backward ring buffer: get start\n");
*end = evt_head;
return 0;
}
evt_head += pheader->size;
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
}
WARN_ONCE(1, "Shouldn't get here\n");
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 189 | 98.95% | 1 | 50.00% |
Colin Ian King | 2 | 1.05% | 1 | 50.00% |
Total | 191 | 100.00% | 2 | 100.00% |
static int
rb_find_range(void *data, int mask, u64 head, u64 old,
u64 *start, u64 *end, bool backward)
{
if (!backward) {
*start = old;
*end = head;
return 0;
}
return backward_rb_find_range(data, mask, head, start, end);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 64 | 100.00% | 2 | 100.00% |
Total | 64 | 100.00% | 2 | 100.00% |
static int
record__mmap_read(struct record *rec, struct perf_mmap *md,
bool overwrite, bool backward)
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
u64 end = head, start = old;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
if (rb_find_range(data, md->mask, head,
old, &start, &end, backward))
return -1;
if (start == end)
return 0;
rec->samples++;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
return 0;
}
if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask];
size = md->mask + 1 - (start & md->mask);
start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
}
buf = &data[start & md->mask];
size = end - start;
start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
md->prev = head;
perf_mmap__consume(md, overwrite || backward);
out:
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 108 | 35.76% | 5 | 31.25% |
Arnaldo Carvalho de Melo | 88 | 29.14% | 7 | 43.75% |
Ingo Molnar | 50 | 16.56% | 1 | 6.25% |
David Ahern | 44 | 14.57% | 2 | 12.50% |
Peter Zijlstra | 12 | 3.97% | 1 | 6.25% |
Total | 302 | 100.00% | 16 | 100.00% |
static volatile int done;
static volatile int signr = -1;
static volatile int child_finished;
static void sig_handler(int sig)
{
if (sig == SIGCHLD)
child_finished = 1;
else
signr = sig;
done = 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static void sigsegv_handler(int sig)
{
perf_hooks__recover();
sighandler_dump_stack(sig);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static void record__sig_exit(void)
{
if (signr == -1)
return;
signal(signr, SIG_DFL);
raise(signr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
struct record *rec = container_of(tool, struct record, tool);
struct perf_data_file *file = &rec->file;
size_t padding;
u8 pad[8] = {0};
if (!perf_data_file__is_pipe(file)) {
off_t file_offset;
int fd = perf_data_file__fd(file);
int err;
file_offset = lseek(fd, 0, SEEK_CUR);
if (file_offset == -1)
return -1;
err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
event, file_offset);
if (err)
return err;
}
/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
padding = (len1 + len2) & 7;
if (padding)
padding = 8 - padding;
record__write(rec, event, event->header.size);
record__write(rec, data1, len1);
if (len2)
record__write(rec, data2, len2);
record__write(rec, &pad, padding);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 206 | 100.00% | 2 | 100.00% |
Total | 206 | 100.00% | 2 | 100.00% |
static int record__auxtrace_mmap_read(struct record *rec,
struct auxtrace_mmap *mm)
{
int ret;
ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
if (ret)
rec->samples++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
struct auxtrace_mmap *mm)
{
int ret;
ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
return ret;
if (ret)
rec->samples++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
static int record__auxtrace_read_snapshot_all(struct record *rec)
{
int i;
int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm =
&rec->evlist->mmap[i].auxtrace_mmap;
if (!mm->base)
continue;
if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
rc = -1;
goto out;
}
}
out:
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 89 | 100.00% | 1 | 100.00% |
Total | 89 | 100.00% | 1 | 100.00% |
static void record__read_auxtrace_snapshot(struct record *rec)
{
pr_debug("Recording AUX area tracing snapshot\n");
if (record__auxtrace_read_snapshot_all(rec) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
} else {
if (auxtrace_record__snapshot_finish(rec->itr))
trigger_error(&auxtrace_snapshot_trigger);
else
trigger_ready(&auxtrace_snapshot_trigger);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 39 | 67.24% | 1 | 50.00% |
Wang Nan | 19 | 32.76% | 1 | 50.00% |
Total | 58 | 100.00% | 2 | 100.00% |
#else
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
struct auxtrace_mmap *mm __maybe_unused)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline
void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 8 | 66.67% | 1 | 33.33% |
Peter Zijlstra | 3 | 25.00% | 1 | 33.33% |
Arnaldo Carvalho de Melo | 1 | 8.33% | 1 | 33.33% |
Total | 12 | 100.00% | 3 | 100.00% |
static inline
int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 11 | 68.75% | 1 | 33.33% |
Arnaldo Carvalho de Melo | 5 | 31.25% | 2 | 66.67% |
Total | 16 | 100.00% | 3 | 100.00% |
#endif
static int record__mmap_evlist(struct record *rec,
struct perf_evlist *evlist)
{
struct record_opts *opts = &rec->opts;
char msg[512];
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
"/proc/sys/kernel/perf_event_mlock_kb,\n"
"or try again with a smaller value of -m/--mmap_pages.\n"
"(current value: %u,%u)\n",
opts->mmap_pages, opts->auxtrace_mmap_pages);
return -errno;
} else {
pr_err("failed to mmap with %d (%s)\n", errno,
str_error_r(errno, msg, sizeof(msg)));
if (errno)
return -errno;
else
return -EINVAL;
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 123 | 99.19% | 1 | 50.00% |
Arnaldo Carvalho de Melo | 1 | 0.81% | 1 | 50.00% |
Total | 124 | 100.00% | 2 | 100.00% |
static int record__mmap(struct record *rec)
{
return record__mmap_evlist(rec, rec->evlist);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static int record__open(struct record *rec)
{
char msg[BUFSIZ];
struct perf_evsel *pos;
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
struct perf_evsel_config_term *err_term;
int rc = 0;
perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, pos) {
try_again:
if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
rc = -errno;
perf_evsel__open_strerror(pos, &opts->target,
errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out;
}
}
if (perf_evlist__apply_filters(evlist, &pos)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
}
if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
}
rc = record__mmap(rec);
if (rc)
goto out;
session->evlist = evlist;
perf_session__set_id_hdr_size(session);
out:
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 157 | 52.68% | 22 | 57.89% |
Mathieu J. Poirier | 57 | 19.13% | 1 | 2.63% |
David Ahern | 30 | 10.07% | 3 | 7.89% |
Frédéric Weisbecker | 18 | 6.04% | 1 | 2.63% |
Masami Hiramatsu | 7 | 2.35% | 1 | 2.63% |
Jiri Olsa | 6 | 2.01% | 1 | 2.63% |
Wang Nan | 6 | 2.01% | 2 | 5.26% |
Li Zefan | 5 | 1.68% | 1 | 2.63% |
Peter Zijlstra | 4 | 1.34% | 1 | 2.63% |
Kan Liang | 2 | 0.67% | 1 | 2.63% |
Namhyung Kim | 2 | 0.67% | 1 | 2.63% |
Ingo Molnar | 2 | 0.67% | 1 | 2.63% |
Stéphane Eranian | 1 | 0.34% | 1 | 2.63% |
Yanmin Zhang | 1 | 0.34% | 1 | 2.63% |
Total | 298 | 100.00% | 38 | 100.00% |
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct record *rec = container_of(tool, struct record, tool);
rec->samples++;
return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Namhyung Kim | 65 | 100.00% | 1 | 100.00% |
Total | 65 | 100.00% | 1 | 100.00% |
static int process_buildids(struct record *rec)
{
struct perf_data_file *file = &rec->file;
struct perf_session *session = rec->session;
if (file->size == 0)
return 0;
/*
* During this process, it'll load kernel map and replace the
* dso->long_name to a real pathname it found. In this case
* we prefer the vmlinux path like
* /lib/modules/3.16.4/build/vmlinux
*
* rather than build-id path (in debug directory).
* $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
*/
symbol_conf.ignore_vmlinux_buildid = true;
/*
* If --buildid-all is given, it marks all DSO regardless of hits,
* so no need to process samples.
*/
if (rec->buildid_all)
rec->tool.sample = NULL;
return perf_session__process_events(session);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 25 | 36.23% | 4 | 44.44% |
Namhyung Kim | 23 | 33.33% | 3 | 33.33% |
Jiri Olsa | 19 | 27.54% | 1 | 11.11% |
He Kuang | 2 | 2.90% | 1 | 11.11% |
Total | 69 | 100.00% | 9 | 100.00% |
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
{
int err;
struct perf_tool *tool = data;
/*
*As for guest kernel when processing subcommand record&report,
*we arrange module mmap prior to guest kernel mmap and trigger
*a preload dso because default guest module symbols are loaded
*from guest kallsyms instead of /lib/modules/XXX/XXX. This
*method is used to avoid symbol missing when the first addr is
*in module instead of in guest kernel.
*/
err = perf_event__synthesize_modules(tool, process_synthesized_event,
machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
/*
* We use _stext for guest kernel because guest kernel's /proc/kallsyms
* have no _text sometimes.
*/
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yanmin Zhang | 55 | 67.90% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 26 | 32.10% | 4 | 80.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static struct perf_event_header finished_round_event = {
.size = sizeof(struct perf_event_header),
.type = PERF_RECORD_FINISHED_ROUND,
};
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
bool backward)
{
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
struct perf_mmap *maps;
if (!evlist)
return 0;
maps = backward ? evlist->backward_mmap : evlist->mmap;
if (!maps)
return 0;
if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) {
if (record__mmap_read(rec, &maps[i],
evlist->overwrite, backward) != 0) {
rc = -1;
goto out;
}
}
if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
record__auxtrace_mmap_read(rec, mm) != 0) {
rc = -1;
goto out;
}
}
/*
* Mark the round finished in case we wrote
* at least one event.
*/
if (bytes_written != rec->bytes_written)
rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
if (backward)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
out:
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 79 | 35.27% | 4 | 30.77% |
Adrian Hunter | 44 | 19.64% | 2 | 15.38% |
Frédéric Weisbecker | 43 | 19.20% | 1 | 7.69% |
David Ahern | 29 | 12.95% | 1 | 7.69% |
Jiri Olsa | 16 | 7.14% | 1 | 7.69% |
Arnaldo Carvalho de Melo | 13 | 5.80% | 4 | 30.77% |
Total | 224 | 100.00% | 13 | 100.00% |
static int record__mmap_read_all(struct record *rec)
{
int err;
err = record__mmap_read_evlist(rec, rec->evlist, false);
if (err)
return err;
return record__mmap_read_evlist(rec, rec->evlist, true);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 46 | 100.00% | 3 | 100.00% |
Total | 46 | 100.00% | 3 | 100.00% |
static void record__init_features(struct record *rec)
{
struct perf_session *session = rec->session;
int feat;
for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
perf_header__set_feat(&session->header, feat);
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
if (!have_tracepoints(&rec->evlist->entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
if (!rec->opts.full_auxtrace)
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
perf_header__clear_feat(&session->header, HEADER_STAT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 99 | 74.44% | 1 | 20.00% |
Adrian Hunter | 19 | 14.29% | 1 | 20.00% |
Jiri Olsa | 10 | 7.52% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 5 | 3.76% | 2 | 40.00% |
Total | 133 | 100.00% | 5 | 100.00% |
static void
record__finish_output(struct record *rec)
{
struct perf_data_file *file = &rec->file;
int fd = perf_data_file__fd(file);
if (file->is_pipe)
return;
rec->session->header.data_size += rec->bytes_written;
file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
if (rec->buildid_all)
dsos__hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
return;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wang Nan | 107 | 100.00% | 1 | 100.00% |
Total | 107 | 100.00% | 1 | 100.00% |
static int record__synthesize_workload(struct record *rec, bool tail)
{
int err;
struct thread_map *thread_map;
if (rec->opts.tail_synthesize != tail)
return 0;
thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
if (thread_map == NULL)
return -1;
err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
process_synthesized_event,
&rec->session->machines.host,
rec->opts.sample_address,
rec->opts.proc_map_timeout);
thread_map__put