Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Stanislav Fomichev | 5079 | 53.34% | 17 | 15.60% |
Arjan van de Ven | 2541 | 26.69% | 10 | 9.17% |
Arnaldo Carvalho de Melo | 850 | 8.93% | 38 | 34.86% |
Jiri Olsa | 318 | 3.34% | 6 | 5.50% |
Ian Rogers | 239 | 2.51% | 6 | 5.50% |
Thomas Renninger | 134 | 1.41% | 2 | 1.83% |
Shang XiaoJing | 120 | 1.26% | 2 | 1.83% |
Ingo Molnar | 98 | 1.03% | 2 | 1.83% |
Yunlong Song | 36 | 0.38% | 2 | 1.83% |
Irina Tirdea | 19 | 0.20% | 2 | 1.83% |
OGAWA Hirofumi | 13 | 0.14% | 2 | 1.83% |
Li Wei | 12 | 0.13% | 1 | 0.92% |
David Ahern | 11 | 0.12% | 1 | 0.92% |
Li Zefan | 10 | 0.11% | 1 | 0.92% |
Namhyung Kim | 7 | 0.07% | 3 | 2.75% |
Mamatha Inamdar | 7 | 0.07% | 1 | 0.92% |
Stéphane Eranian | 5 | 0.05% | 1 | 0.92% |
He Kuang | 4 | 0.04% | 1 | 0.92% |
Chris Samuel | 3 | 0.03% | 1 | 0.92% |
Andi Kleen | 3 | 0.03% | 1 | 0.92% |
Jason Baron | 3 | 0.03% | 1 | 0.92% |
Martin Kepplinger | 2 | 0.02% | 1 | 0.92% |
Thomas Gleixner | 2 | 0.02% | 1 | 0.92% |
Ramkumar Ramachandra | 1 | 0.01% | 1 | 0.92% |
Josh Poimboeuf | 1 | 0.01% | 1 | 0.92% |
Gustavo A. R. Silva | 1 | 0.01% | 1 | 0.92% |
Yicong Yang | 1 | 0.01% | 1 | 0.92% |
Kyle Meyer | 1 | 0.01% | 1 | 0.92% |
Ian Munsie | 1 | 0.01% | 1 | 0.92% |
Total | 9522 | 109 |
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043
// SPDX-License-Identifier: GPL-2.0-only /* * builtin-timechart.c - make an svg timechart of system activity * * (C) Copyright 2009 Intel Corporation * * Authors: * Arjan van de Ven <arjan@linux.intel.com> */ #include <errno.h> #include <inttypes.h> #include "builtin.h" #include "util/color.h" #include <linux/list.h> #include "util/evlist.h" // for struct evsel_str_handler #include "util/evsel.h" #include <linux/kernel.h> #include <linux/rbtree.h> #include <linux/time64.h> #include <linux/zalloc.h> #include "util/symbol.h" #include "util/thread.h" #include "util/callchain.h" #include "util/header.h" #include <subcmd/pager.h> #include <subcmd/parse-options.h> #include "util/parse-events.h" #include "util/event.h" #include "util/session.h" #include "util/svghelper.h" #include "util/tool.h" #include "util/data.h" #include "util/debug.h" #include "util/string2.h" #include "util/tracepoint.h" #include "util/util.h" #include <linux/err.h> #include <event-parse.h> #ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE FILE *open_memstream(char **ptr, size_t *sizeloc); #endif #define SUPPORT_OLD_POWER_EVENTS 1 #define PWR_EVENT_EXIT -1 struct per_pid; struct power_event; struct wake_event; struct timechart { struct perf_tool tool; struct per_pid *all_data; struct power_event *power_events; struct wake_event *wake_events; int proc_num; unsigned int numcpus; u64 min_freq, /* Lowest CPU frequency seen */ max_freq, /* Highest CPU frequency seen */ turbo_frequency, first_time, last_time; bool power_only, tasks_only, with_backtrace, topology; bool force; /* IO related settings */ bool io_only, skip_eagain; u64 io_events; u64 min_time, merge_dist; }; struct per_pidcomm; struct cpu_sample; struct io_sample; /* * Datastructure layout: * We keep an list of "pid"s, matching the kernels notion of a task struct. * Each "pid" entry, has a list of "comm"s. * this is because we want to track different programs different, while * exec will reuse the original pid (by design). * Each comm has a list of samples that will be used to draw * final graph. */ struct per_pid { struct per_pid *next; int pid; int ppid; u64 start_time; u64 end_time; u64 total_time; u64 total_bytes; int display; struct per_pidcomm *all; struct per_pidcomm *current; }; struct per_pidcomm { struct per_pidcomm *next; u64 start_time; u64 end_time; u64 total_time; u64 max_bytes; u64 total_bytes; int Y; int display; long state; u64 state_since; char *comm; struct cpu_sample *samples; struct io_sample *io_samples; }; struct sample_wrapper { struct sample_wrapper *next; u64 timestamp; unsigned char data[]; }; #define TYPE_NONE 0 #define TYPE_RUNNING 1 #define TYPE_WAITING 2 #define TYPE_BLOCKED 3 struct cpu_sample { struct cpu_sample *next; u64 start_time; u64 end_time; int type; int cpu; const char *backtrace; }; enum { IOTYPE_READ, IOTYPE_WRITE, IOTYPE_SYNC, IOTYPE_TX, IOTYPE_RX, IOTYPE_POLL, }; struct io_sample { struct io_sample *next; u64 start_time; u64 end_time; u64 bytes; int type; int fd; int err; int merges; }; #define CSTATE 1 #define PSTATE 2 struct power_event { struct power_event *next; int type; int state; u64 start_time; u64 end_time; int cpu; }; struct wake_event { struct wake_event *next; int waker; int wakee; u64 time; const char *backtrace; }; struct process_filter { char *name; int pid; struct process_filter *next; }; static struct process_filter *process_filter; static struct per_pid *find_create_pid(struct timechart *tchart, int pid) { struct per_pid *cursor = tchart->all_data; while (cursor) { if (cursor->pid == pid) return cursor; cursor = cursor->next; } cursor = zalloc(sizeof(*cursor)); assert(cursor != NULL); cursor->pid = pid; cursor->next = tchart->all_data; tchart->all_data = cursor; return cursor; } static struct per_pidcomm *create_pidcomm(struct per_pid *p) { struct per_pidcomm *c; c = zalloc(sizeof(*c)); if (!c) return NULL; p->current = c; c->next = p->all; p->all = c; return c; } static void pid_set_comm(struct timechart *tchart, int pid, char *comm) { struct per_pid *p; struct per_pidcomm *c; p = find_create_pid(tchart, pid); c = p->all; while (c) { if (c->comm && strcmp(c->comm, comm) == 0) { p->current = c; return; } if (!c->comm) { c->comm = strdup(comm); p->current = c; return; } c = c->next; } c = create_pidcomm(p); assert(c != NULL); c->comm = strdup(comm); } static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp) { struct per_pid *p, *pp; p = find_create_pid(tchart, pid); pp = find_create_pid(tchart, ppid); p->ppid = ppid; if (pp->current && pp->current->comm && !p->current) pid_set_comm(tchart, pid, pp->current->comm); p->start_time = timestamp; if (p->current && !p->current->start_time) { p->current->start_time = timestamp; p->current->state_since = timestamp; } } static void pid_exit(struct timechart *tchart, int pid, u64 timestamp) { struct per_pid *p; p = find_create_pid(tchart, pid); p->end_time = timestamp; if (p->current) p->current->end_time = timestamp; } static void pid_put_sample(struct timechart *tchart, int pid, int type, unsigned int cpu, u64 start, u64 end, const char *backtrace) { struct per_pid *p; struct per_pidcomm *c; struct cpu_sample *sample; p = find_create_pid(tchart, pid); c = p->current; if (!c) { c = create_pidcomm(p); assert(c != NULL); } sample = zalloc(sizeof(*sample)); assert(sample != NULL); sample->start_time = start; sample->end_time = end; sample->type = type; sample->next = c->samples; sample->cpu = cpu; sample->backtrace = backtrace; c->samples = sample; if (sample->type == TYPE_RUNNING && end > start && start > 0) { c->total_time += (end-start); p->total_time += (end-start); } if (c->start_time == 0 || c->start_time > start) c->start_time = start; if (p->start_time == 0 || p->start_time > start) p->start_time = start; } #define MAX_CPUS 4096 static u64 *cpus_cstate_start_times; static int *cpus_cstate_state; static u64 *cpus_pstate_start_times; static u64 *cpus_pstate_state; static int process_comm_event(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { struct timechart *tchart = container_of(tool, struct timechart, tool); pid_set_comm(tchart, event->comm.tid, event->comm.comm); return 0; } static int process_fork_event(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { struct timechart *tchart = container_of(tool, struct timechart, tool); pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time); return 0; } static int process_exit_event(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { struct timechart *tchart = container_of(tool, struct timechart, tool); pid_exit(tchart, event->fork.pid, event->fork.time); return 0; } #ifdef SUPPORT_OLD_POWER_EVENTS static int use_old_power_events; #endif static void c_state_start(int cpu, u64 timestamp, int state) { cpus_cstate_start_times[cpu] = timestamp; cpus_cstate_state[cpu] = state; } static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp) { struct power_event *pwr = zalloc(sizeof(*pwr)); if (!pwr) return; pwr->state = cpus_cstate_state[cpu]; pwr->start_time = cpus_cstate_start_times[cpu]; pwr->end_time = timestamp; pwr->cpu = cpu; pwr->type = CSTATE; pwr->next = tchart->power_events; tchart->power_events = pwr; } static struct power_event *p_state_end(struct timechart *tchart, int cpu, u64 timestamp) { struct power_event *pwr = zalloc(sizeof(*pwr)); if (!pwr) return NULL; pwr->state = cpus_pstate_state[cpu]; pwr->start_time = cpus_pstate_start_times[cpu]; pwr->end_time = timestamp; pwr->cpu = cpu; pwr->type = PSTATE; pwr->next = tchart->power_events; if (!pwr->start_time) pwr->start_time = tchart->first_time; tchart->power_events = pwr; return pwr; } static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq) { struct power_event *pwr; if (new_freq > 8000000) /* detect invalid data */ return; pwr = p_state_end(tchart, cpu, timestamp); if (!pwr) return; cpus_pstate_state[cpu] = new_freq; cpus_pstate_start_times[cpu] = timestamp; if ((u64)new_freq > tchart->max_freq) tchart->max_freq = new_freq; if (new_freq < tchart->min_freq || tchart->min_freq == 0) tchart->min_freq = new_freq; if (new_freq == tchart->max_freq - 1000) tchart->turbo_frequency = tchart->max_freq; } static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp, int waker, int wakee, u8 flags, const char *backtrace) { struct per_pid *p; struct wake_event *we = zalloc(sizeof(*we)); if (!we) return; we->time = timestamp; we->waker = waker; we->backtrace = backtrace; if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ)) we->waker = -1; we->wakee = wakee; we->next = tchart->wake_events; tchart->wake_events = we; p = find_create_pid(tchart, we->wakee); if (p && p->current && p->current->state == TYPE_NONE) { p->current->state_since = timestamp; p->current->state = TYPE_WAITING; } if (p && p->current && p->current->state == TYPE_BLOCKED) { pid_put_sample(tchart, p->pid, p->current->state, cpu, p->current->state_since, timestamp, NULL); p->current->state_since = timestamp; p->current->state = TYPE_WAITING; } } static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp, int prev_pid, int next_pid, u64 prev_state, const char *backtrace) { struct per_pid *p = NULL, *prev_p; prev_p = find_create_pid(tchart, prev_pid); p = find_create_pid(tchart, next_pid); if (prev_p->current && prev_p->current->state != TYPE_NONE) pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp, backtrace); if (p && p->current) { if (p->current->state != TYPE_NONE) pid_put_sample(tchart, next_pid, p->current->state, cpu, p->current->state_since, timestamp, backtrace); p->current->state_since = timestamp; p->current->state = TYPE_RUNNING; } if (prev_p->current) { prev_p->current->state = TYPE_NONE; prev_p->current->state_since = timestamp; if (prev_state & 2) prev_p->current->state = TYPE_BLOCKED; if (prev_state == 0) prev_p->current->state = TYPE_WAITING; } } static const char *cat_backtrace(union perf_event *event, struct perf_sample *sample, struct machine *machine) { struct addr_location al; unsigned int i; char *p = NULL; size_t p_len; u8 cpumode = PERF_RECORD_MISC_USER; struct ip_callchain *chain = sample->callchain; FILE *f = open_memstream(&p, &p_len); if (!f) { perror("open_memstream error"); return NULL; } addr_location__init(&al); if (!chain) goto exit; if (machine__resolve(machine, &al, sample) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); goto exit; } for (i = 0; i < chain->nr; i++) { u64 ip; struct addr_location tal; if (callchain_param.order == ORDER_CALLEE) ip = chain->ips[i]; else ip = chain->ips[chain->nr - i - 1]; if (ip >= PERF_CONTEXT_MAX) { switch (ip) { case PERF_CONTEXT_HV: cpumode = PERF_RECORD_MISC_HYPERVISOR; break; case PERF_CONTEXT_KERNEL: cpumode = PERF_RECORD_MISC_KERNEL; break; case PERF_CONTEXT_USER: cpumode = PERF_RECORD_MISC_USER; break; default: pr_debug("invalid callchain context: " "%"PRId64"\n", (s64) ip); /* * It seems the callchain is corrupted. * Discard all. */ zfree(&p); goto exit; } continue; } addr_location__init(&tal); tal.filtered = 0; if (thread__find_symbol(al.thread, cpumode, ip, &tal)) fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name); else fprintf(f, "..... %016" PRIx64 "\n", ip); addr_location__exit(&tal); } exit: addr_location__exit(&al); fclose(f); return p; } typedef int (*tracepoint_handler)(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample, const char *backtrace); static int process_sample_event(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, struct machine *machine) { struct timechart *tchart = container_of(tool, struct timechart, tool); if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) { if (!tchart->first_time || tchart->first_time > sample->time) tchart->first_time = sample->time; if (tchart->last_time < sample->time) tchart->last_time = sample->time; } if (evsel->handler != NULL) { tracepoint_handler f = evsel->handler; return f(tchart, evsel, sample, cat_backtrace(event, sample, machine)); } return 0; } static int process_sample_cpu_idle(struct timechart *tchart __maybe_unused, struct evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) { u32 state = evsel__intval(evsel, sample, "state"); u32 cpu_id = evsel__intval(evsel, sample, "cpu_id"); if (state == (u32)PWR_EVENT_EXIT) c_state_end(tchart, cpu_id, sample->time); else c_state_start(cpu_id, sample->time, state); return 0; } static int process_sample_cpu_frequency(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) { u32 state = evsel__intval(evsel, sample, "state"); u32 cpu_id = evsel__intval(evsel, sample, "cpu_id"); p_state_change(tchart, cpu_id, sample->time, state); return 0; } static int process_sample_sched_wakeup(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample, const char *backtrace) { u8 flags = evsel__intval(evsel, sample, "common_flags"); int waker = evsel__intval(evsel, sample, "common_pid"); int wakee = evsel__intval(evsel, sample, "pid"); sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace); return 0; } static int process_sample_sched_switch(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample, const char *backtrace) { int prev_pid = evsel__intval(evsel, sample, "prev_pid"); int next_pid = evsel__intval(evsel, sample, "next_pid"); u64 prev_state = evsel__intval(evsel, sample, "prev_state"); sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid, prev_state, backtrace); return 0; } #ifdef SUPPORT_OLD_POWER_EVENTS static int process_sample_power_start(struct timechart *tchart __maybe_unused, struct evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) { u64 cpu_id = evsel__intval(evsel, sample, "cpu_id"); u64 value = evsel__intval(evsel, sample, "value"); c_state_start(cpu_id, sample->time, value); return 0; } static int process_sample_power_end(struct timechart *tchart, struct evsel *evsel __maybe_unused, struct perf_sample *sample, const char *backtrace __maybe_unused) { c_state_end(tchart, sample->cpu, sample->time); return 0; } static int process_sample_power_frequency(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample, const char *backtrace __maybe_unused) { u64 cpu_id = evsel__intval(evsel, sample, "cpu_id"); u64 value = evsel__intval(evsel, sample, "value"); p_state_change(tchart, cpu_id, sample->time, value); return 0; } #endif /* SUPPORT_OLD_POWER_EVENTS */ /* * After the last sample we need to wrap up the current C/P state * and close out each CPU for these. */ static void end_sample_processing(struct timechart *tchart) { u64 cpu; struct power_event *pwr; for (cpu = 0; cpu <= tchart->numcpus; cpu++) { /* C state */ #if 0 pwr = zalloc(sizeof(*pwr)); if (!pwr) return; pwr->state = cpus_cstate_state[cpu]; pwr->start_time = cpus_cstate_start_times[cpu]; pwr->end_time = tchart->last_time; pwr->cpu = cpu; pwr->type = CSTATE; pwr->next = tchart->power_events; tchart->power_events = pwr; #endif /* P state */ pwr = p_state_end(tchart, cpu, tchart->last_time); if (!pwr) return; if (!pwr->state) pwr->state = tchart->min_freq; } } static int pid_begin_io_sample(struct timechart *tchart, int pid, int type, u64 start, int fd) { struct per_pid *p = find_create_pid(tchart, pid); struct per_pidcomm *c = p->current; struct io_sample *sample; struct io_sample *prev; if (!c) { c = create_pidcomm(p); if (!c) return -ENOMEM; } prev = c->io_samples; if (prev && prev->start_time && !prev->end_time) { pr_warning("Skip invalid start event: " "previous event already started!\n"); /* remove previous event that has been started, * we are not sure we will ever get an end for it */ c->io_samples = prev->next; free(prev); return 0; } sample = zalloc(sizeof(*sample)); if (!sample) return -ENOMEM; sample->start_time = start; sample->type = type; sample->fd = fd; sample->next = c->io_samples; c->io_samples = sample; if (c->start_time == 0 || c->start_time > start) c->start_time = start; return 0; } static int pid_end_io_sample(struct timechart *tchart, int pid, int type, u64 end, long ret) { struct per_pid *p = find_create_pid(tchart, pid); struct per_pidcomm *c = p->current; struct io_sample *sample, *prev; if (!c) { pr_warning("Invalid pidcomm!\n"); return -1; } sample = c->io_samples; if (!sample) /* skip partially captured events */ return 0; if (sample->end_time) { pr_warning("Skip invalid end event: " "previous event already ended!\n"); return 0; } if (sample->type != type) { pr_warning("Skip invalid end event: invalid event type!\n"); return 0; } sample->end_time = end; prev = sample->next; /* we want to be able to see small and fast transfers, so make them * at least min_time long, but don't overlap them */ if (sample->end_time - sample->start_time < tchart->min_time) sample->end_time = sample->start_time + tchart->min_time; if (prev && sample->start_time < prev->end_time) { if (prev->err) /* try to make errors more visible */ sample->start_time = prev->end_time; else prev->end_time = sample->start_time; } if (ret < 0) { sample->err = ret; } else if (type == IOTYPE_READ || type == IOTYPE_WRITE || type == IOTYPE_TX || type == IOTYPE_RX) { if ((u64)ret > c->max_bytes) c->max_bytes = ret; c->total_bytes += ret; p->total_bytes += ret; sample->bytes = ret; } /* merge two requests to make svg smaller and render-friendly */ if (prev && prev->type == sample->type && prev->err == sample->err && prev->fd == sample->fd && prev->end_time + tchart->merge_dist >= sample->start_time) { sample->bytes += prev->bytes; sample->merges += prev->merges + 1; sample->start_time = prev->start_time; sample->next = prev->next; free(prev); if (!sample->err && sample->bytes > c->max_bytes) c->max_bytes = sample->bytes; } tchart->io_events++; return 0; } static int process_enter_read(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long fd = evsel__intval(evsel, sample, "fd"); return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ, sample->time, fd); } static int process_exit_read(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long ret = evsel__intval(evsel, sample, "ret"); return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ, sample->time, ret); } static int process_enter_write(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long fd = evsel__intval(evsel, sample, "fd"); return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE, sample->time, fd); } static int process_exit_write(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long ret = evsel__intval(evsel, sample, "ret"); return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE, sample->time, ret); } static int process_enter_sync(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long fd = evsel__intval(evsel, sample, "fd"); return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC, sample->time, fd); } static int process_exit_sync(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long ret = evsel__intval(evsel, sample, "ret"); return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC, sample->time, ret); } static int process_enter_tx(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long fd = evsel__intval(evsel, sample, "fd"); return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX, sample->time, fd); } static int process_exit_tx(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long ret = evsel__intval(evsel, sample, "ret"); return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX, sample->time, ret); } static int process_enter_rx(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long fd = evsel__intval(evsel, sample, "fd"); return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX, sample->time, fd); } static int process_exit_rx(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long ret = evsel__intval(evsel, sample, "ret"); return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX, sample->time, ret); } static int process_enter_poll(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long fd = evsel__intval(evsel, sample, "fd"); return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL, sample->time, fd); } static int process_exit_poll(struct timechart *tchart, struct evsel *evsel, struct perf_sample *sample) { long ret = evsel__intval(evsel, sample, "ret"); return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL, sample->time, ret); } /* * Sort the pid datastructure */ static void sort_pids(struct timechart *tchart) { struct per_pid *new_list, *p, *cursor, *prev; /* sort by ppid first, then by pid, lowest to highest */ new_list = NULL; while (tchart->all_data) { p = tchart->all_data; tchart->all_data = p->next; p->next = NULL; if (new_list == NULL) { new_list = p; p->next = NULL; continue; } prev = NULL; cursor = new_list; while (cursor) { if (cursor->ppid > p->ppid || (cursor->ppid == p->ppid && cursor->pid > p->pid)) { /* must insert before */ if (prev) { p->next = prev->next; prev->next = p; cursor = NULL; continue; } else { p->next = new_list; new_list = p; cursor = NULL; continue; } } prev = cursor; cursor = cursor->next; if (!cursor) prev->next = p; } } tchart->all_data = new_list; } static void draw_c_p_states(struct timechart *tchart) { struct power_event *pwr; pwr = tchart->power_events; /* * two pass drawing so that the P state bars are on top of the C state blocks */ while (pwr) { if (pwr->type == CSTATE) svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); pwr = pwr->next; } pwr = tchart->power_events; while (pwr) { if (pwr->type == PSTATE) { if (!pwr->state) pwr->state = tchart->min_freq; svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); } pwr = pwr->next; } } static void draw_wakeups(struct timechart *tchart) { struct wake_event *we; struct per_pid *p; struct per_pidcomm *c; we = tchart->wake_events; while (we) { int from = 0, to = 0; char *task_from = NULL, *task_to = NULL; /* locate the column of the waker and wakee */ p = tchart->all_data; while (p) { if (p->pid == we->waker || p->pid == we->wakee) { c = p->all; while (c) { if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { if (p->pid == we->waker && !from) { from = c->Y; task_from = strdup(c->comm); } if (p->pid == we->wakee && !to) { to = c->Y; task_to = strdup(c->comm); } } c = c->next; } c = p->all; while (c) { if (p->pid == we->waker && !from) { from = c->Y; task_from = strdup(c->comm); } if (p->pid == we->wakee && !to) { to = c->Y; task_to = strdup(c->comm); } c = c->next; } } p = p->next; } if (!task_from) { task_from = malloc(40); sprintf(task_from, "[%i]", we->waker); } if (!task_to) { task_to = malloc(40); sprintf(task_to, "[%i]", we->wakee); } if (we->waker == -1) svg_interrupt(we->time, to, we->backtrace); else if (from && to && abs(from - to) == 1) svg_wakeline(we->time, from, to, we->backtrace); else svg_partial_wakeline(we->time, from, task_from, to, task_to, we->backtrace); we = we->next; free(task_from); free(task_to); } } static void draw_cpu_usage(struct timechart *tchart) { struct per_pid *p; struct per_pidcomm *c; struct cpu_sample *sample; p = tchart->all_data; while (p) { c = p->all; while (c) { sample = c->samples; while (sample) { if (sample->type == TYPE_RUNNING) { svg_process(sample->cpu, sample->start_time, sample->end_time, p->pid, c->comm, sample->backtrace); } sample = sample->next; } c = c->next; } p = p->next; } } static void draw_io_bars(struct timechart *tchart) { const char *suf; double bytes; char comm[256]; struct per_pid *p; struct per_pidcomm *c; struct io_sample *sample; int Y = 1; p = tchart->all_data; while (p) { c = p->all; while (c) { if (!c->display) { c->Y = 0; c = c->next; continue; } svg_box(Y, c->start_time, c->end_time, "process3"); for (sample = c->io_samples; sample; sample = sample->next) { double h = (double)sample->bytes / c->max_bytes; if (tchart->skip_eagain && sample->err == -EAGAIN) continue; if (sample->err) h = 1; if (sample->type == IOTYPE_SYNC) svg_fbox(Y, sample->start_time, sample->end_time, 1, sample->err ? "error" : "sync", sample->fd, sample->err, sample->merges); else if (sample->type == IOTYPE_POLL) svg_fbox(Y, sample->start_time, sample->end_time, 1, sample->err ? "error" : "poll", sample->fd, sample->err, sample->merges); else if (sample->type == IOTYPE_READ) svg_ubox(Y, sample->start_time, sample->end_time, h, sample->err ? "error" : "disk", sample->fd, sample->err, sample->merges); else if (sample->type == IOTYPE_WRITE) svg_lbox(Y, sample->start_time, sample->end_time, h, sample->err ? "error" : "disk", sample->fd, sample->err, sample->merges); else if (sample->type == IOTYPE_RX) svg_ubox(Y, sample->start_time, sample->end_time, h, sample->err ? "error" : "net", sample->fd, sample->err, sample->merges); else if (sample->type == IOTYPE_TX) svg_lbox(Y, sample->start_time, sample->end_time, h, sample->err ? "error" : "net", sample->fd, sample->err, sample->merges); } suf = ""; bytes = c->total_bytes; if (bytes > 1024) { bytes = bytes / 1024; suf = "K"; } if (bytes > 1024) { bytes = bytes / 1024; suf = "M"; } if (bytes > 1024) { bytes = bytes / 1024; suf = "G"; } sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf); svg_text(Y, c->start_time, comm); c->Y = Y; Y++; c = c->next; } p = p->next; } } static void draw_process_bars(struct timechart *tchart) { struct per_pid *p; struct per_pidcomm *c; struct cpu_sample *sample; int Y = 0; Y = 2 * tchart->numcpus + 2; p = tchart->all_data; while (p) { c = p->all; while (c) { if (!c->display) { c->Y = 0; c = c->next; continue; } svg_box(Y, c->start_time, c->end_time, "process"); sample = c->samples; while (sample) { if (sample->type == TYPE_RUNNING) svg_running(Y, sample->cpu, sample->start_time, sample->end_time, sample->backtrace); if (sample->type == TYPE_BLOCKED) svg_blocked(Y, sample->cpu, sample->start_time, sample->end_time, sample->backtrace); if (sample->type == TYPE_WAITING) svg_waiting(Y, sample->cpu, sample->start_time, sample->end_time, sample->backtrace); sample = sample->next; } if (c->comm) { char comm[256]; if (c->total_time > 5000000000) /* 5 seconds */ sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC); else sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC); svg_text(Y, c->start_time, comm); } c->Y = Y; Y++; c = c->next; } p = p->next; } } static void add_process_filter(const char *string) { int pid = strtoull(string, NULL, 10); struct process_filter *filt = malloc(sizeof(*filt)); if (!filt) return; filt->name = strdup(string); filt->pid = pid; filt->next = process_filter; process_filter = filt; } static int passes_filter(struct per_pid *p, struct per_pidcomm *c) { struct process_filter *filt; if (!process_filter) return 1; filt = process_filter; while (filt) { if (filt->pid && p->pid == filt->pid) return 1; if (strcmp(filt->name, c->comm) == 0) return 1; filt = filt->next; } return 0; } static int determine_display_tasks_filtered(struct timechart *tchart) { struct per_pid *p; struct per_pidcomm *c; int count = 0; p = tchart->all_data; while (p) { p->display = 0; if (p->start_time == 1) p->start_time = tchart->first_time; /* no exit marker, task kept running to the end */ if (p->end_time == 0) p->end_time = tchart->last_time; c = p->all; while (c) { c->display = 0; if (c->start_time == 1) c->start_time = tchart->first_time; if (passes_filter(p, c)) { c->display = 1; p->display = 1; count++; } if (c->end_time == 0) c->end_time = tchart->last_time; c = c->next; } p = p->next; } return count; } static int determine_display_tasks(struct timechart *tchart, u64 threshold) { struct per_pid *p; struct per_pidcomm *c; int count = 0; p = tchart->all_data; while (p) { p->display = 0; if (p->start_time == 1) p->start_time = tchart->first_time; /* no exit marker, task kept running to the end */ if (p->end_time == 0) p->end_time = tchart->last_time; if (p->total_time >= threshold) p->display = 1; c = p->all; while (c) { c->display = 0; if (c->start_time == 1) c->start_time = tchart->first_time; if (c->total_time >= threshold) { c->display = 1; count++; } if (c->end_time == 0) c->end_time = tchart->last_time; c = c->next; } p = p->next; } return count; } static int determine_display_io_tasks(struct timechart *timechart, u64 threshold) { struct per_pid *p; struct per_pidcomm *c; int count = 0; p = timechart->all_data; while (p) { /* no exit marker, task kept running to the end */ if (p->end_time == 0) p->end_time = timechart->last_time; c = p->all; while (c) { c->display = 0; if (c->total_bytes >= threshold) { c->display = 1; count++; } if (c->end_time == 0) c->end_time = timechart->last_time; c = c->next; } p = p->next; } return count; } #define BYTES_THRESH (1 * 1024 * 1024) #define TIME_THRESH 10000000 static void write_svg_file(struct timechart *tchart, const char *filename) { u64 i; int count; int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH; if (tchart->power_only) tchart->proc_num = 0; /* We'd like to show at least proc_num tasks; * be less picky if we have fewer */ do { if (process_filter) count = determine_display_tasks_filtered(tchart); else if (tchart->io_events) count = determine_display_io_tasks(tchart, thresh); else count = determine_display_tasks(tchart, thresh); thresh /= 10; } while (!process_filter && thresh && count < tchart->proc_num); if (!tchart->proc_num) count = 0; if (tchart->io_events) { open_svg(filename, 0, count, tchart->first_time, tchart->last_time); svg_time_grid(0.5); svg_io_legenda(); draw_io_bars(tchart); } else { open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time); svg_time_grid(0); svg_legenda(); for (i = 0; i < tchart->numcpus; i++) svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency); draw_cpu_usage(tchart); if (tchart->proc_num) draw_process_bars(tchart); if (!tchart->tasks_only) draw_c_p_states(tchart); if (tchart->proc_num) draw_wakeups(tchart); } svg_close(); } static int process_header(struct perf_file_section *section __maybe_unused, struct perf_header *ph, int feat, int fd __maybe_unused, void *data) { struct timechart *tchart = data; switch (feat) { case HEADER_NRCPUS: tchart->numcpus = ph->env.nr_cpus_avail; break; case HEADER_CPU_TOPOLOGY: if (!tchart->topology) break; if (svg_build_topology_map(&ph->env)) fprintf(stderr, "problem building topology\n"); break; default: break; } return 0; } static int __cmd_timechart(struct timechart *tchart, const char *output_name) { const struct evsel_str_handler power_tracepoints[] = { { "power:cpu_idle", process_sample_cpu_idle }, { "power:cpu_frequency", process_sample_cpu_frequency }, { "sched:sched_wakeup", process_sample_sched_wakeup }, { "sched:sched_switch", process_sample_sched_switch }, #ifdef SUPPORT_OLD_POWER_EVENTS { "power:power_start", process_sample_power_start }, { "power:power_end", process_sample_power_end }, { "power:power_frequency", process_sample_power_frequency }, #endif { "syscalls:sys_enter_read", process_enter_read }, { "syscalls:sys_enter_pread64", process_enter_read }, { "syscalls:sys_enter_readv", process_enter_read }, { "syscalls:sys_enter_preadv", process_enter_read }, { "syscalls:sys_enter_write", process_enter_write }, { "syscalls:sys_enter_pwrite64", process_enter_write }, { "syscalls:sys_enter_writev", process_enter_write }, { "syscalls:sys_enter_pwritev", process_enter_write }, { "syscalls:sys_enter_sync", process_enter_sync }, { "syscalls:sys_enter_sync_file_range", process_enter_sync }, { "syscalls:sys_enter_fsync", process_enter_sync }, { "syscalls:sys_enter_msync", process_enter_sync }, { "syscalls:sys_enter_recvfrom", process_enter_rx }, { "syscalls:sys_enter_recvmmsg", process_enter_rx }, { "syscalls:sys_enter_recvmsg", process_enter_rx }, { "syscalls:sys_enter_sendto", process_enter_tx }, { "syscalls:sys_enter_sendmsg", process_enter_tx }, { "syscalls:sys_enter_sendmmsg", process_enter_tx }, { "syscalls:sys_enter_epoll_pwait", process_enter_poll }, { "syscalls:sys_enter_epoll_wait", process_enter_poll }, { "syscalls:sys_enter_poll", process_enter_poll }, { "syscalls:sys_enter_ppoll", process_enter_poll }, { "syscalls:sys_enter_pselect6", process_enter_poll }, { "syscalls:sys_enter_select", process_enter_poll }, { "syscalls:sys_exit_read", process_exit_read }, { "syscalls:sys_exit_pread64", process_exit_read }, { "syscalls:sys_exit_readv", process_exit_read }, { "syscalls:sys_exit_preadv", process_exit_read }, { "syscalls:sys_exit_write", process_exit_write }, { "syscalls:sys_exit_pwrite64", process_exit_write }, { "syscalls:sys_exit_writev", process_exit_write }, { "syscalls:sys_exit_pwritev", process_exit_write }, { "syscalls:sys_exit_sync", process_exit_sync }, { "syscalls:sys_exit_sync_file_range", process_exit_sync }, { "syscalls:sys_exit_fsync", process_exit_sync }, { "syscalls:sys_exit_msync", process_exit_sync }, { "syscalls:sys_exit_recvfrom", process_exit_rx }, { "syscalls:sys_exit_recvmmsg", process_exit_rx }, { "syscalls:sys_exit_recvmsg", process_exit_rx }, { "syscalls:sys_exit_sendto", process_exit_tx }, { "syscalls:sys_exit_sendmsg", process_exit_tx }, { "syscalls:sys_exit_sendmmsg", process_exit_tx }, { "syscalls:sys_exit_epoll_pwait", process_exit_poll }, { "syscalls:sys_exit_epoll_wait", process_exit_poll }, { "syscalls:sys_exit_poll", process_exit_poll }, { "syscalls:sys_exit_ppoll", process_exit_poll }, { "syscalls:sys_exit_pselect6", process_exit_poll }, { "syscalls:sys_exit_select", process_exit_poll }, }; struct perf_data data = { .path = input_name, .mode = PERF_DATA_MODE_READ, .force = tchart->force, }; struct perf_session *session; int ret = -EINVAL; perf_tool__init(&tchart->tool, /*ordered_events=*/true); tchart->tool.comm = process_comm_event; tchart->tool.fork = process_fork_event; tchart->tool.exit = process_exit_event; tchart->tool.sample = process_sample_event; session = perf_session__new(&data, &tchart->tool); if (IS_ERR(session)) return PTR_ERR(session); symbol__init(&session->header.env); (void)perf_header__process_sections(&session->header, perf_data__fd(session->data), tchart, process_header); if (!perf_session__has_traces(session, "timechart record")) goto out_delete; if (perf_session__set_tracepoints_handlers(session, power_tracepoints)) { pr_err("Initializing session tracepoint handlers failed\n"); goto out_delete; } ret = perf_session__process_events(session); if (ret) goto out_delete; end_sample_processing(tchart); sort_pids(tchart); write_svg_file(tchart, output_name); pr_info("Written %2.1f seconds of trace to %s.\n", (tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name); out_delete: perf_session__delete(session); return ret; } static int timechart__io_record(int argc, const char **argv) { unsigned int rec_argc, i; const char **rec_argv; const char **p; char *filter = NULL; const char * const common_args[] = { "record", "-a", "-R", "-c", "1", }; unsigned int common_args_nr = ARRAY_SIZE(common_args); const char * const disk_events[] = { "syscalls:sys_enter_read", "syscalls:sys_enter_pread64", "syscalls:sys_enter_readv", "syscalls:sys_enter_preadv", "syscalls:sys_enter_write", "syscalls:sys_enter_pwrite64", "syscalls:sys_enter_writev", "syscalls:sys_enter_pwritev", "syscalls:sys_enter_sync", "syscalls:sys_enter_sync_file_range", "syscalls:sys_enter_fsync", "syscalls:sys_enter_msync", "syscalls:sys_exit_read", "syscalls:sys_exit_pread64", "syscalls:sys_exit_readv", "syscalls:sys_exit_preadv", "syscalls:sys_exit_write", "syscalls:sys_exit_pwrite64", "syscalls:sys_exit_writev", "syscalls:sys_exit_pwritev", "syscalls:sys_exit_sync", "syscalls:sys_exit_sync_file_range", "syscalls:sys_exit_fsync", "syscalls:sys_exit_msync", }; unsigned int disk_events_nr = ARRAY_SIZE(disk_events); const char * const net_events[] = { "syscalls:sys_enter_recvfrom", "syscalls:sys_enter_recvmmsg", "syscalls:sys_enter_recvmsg", "syscalls:sys_enter_sendto", "syscalls:sys_enter_sendmsg", "syscalls:sys_enter_sendmmsg", "syscalls:sys_exit_recvfrom", "syscalls:sys_exit_recvmmsg", "syscalls:sys_exit_recvmsg", "syscalls:sys_exit_sendto", "syscalls:sys_exit_sendmsg", "syscalls:sys_exit_sendmmsg", }; unsigned int net_events_nr = ARRAY_SIZE(net_events); const char * const poll_events[] = { "syscalls:sys_enter_epoll_pwait", "syscalls:sys_enter_epoll_wait", "syscalls:sys_enter_poll", "syscalls:sys_enter_ppoll", "syscalls:sys_enter_pselect6", "syscalls:sys_enter_select", "syscalls:sys_exit_epoll_pwait", "syscalls:sys_exit_epoll_wait", "syscalls:sys_exit_poll", "syscalls:sys_exit_ppoll", "syscalls:sys_exit_pselect6", "syscalls:sys_exit_select", }; unsigned int poll_events_nr = ARRAY_SIZE(poll_events); rec_argc = common_args_nr + disk_events_nr * 4 + net_events_nr * 4 + poll_events_nr * 4 + argc; rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) return -ENOMEM; if (asprintf(&filter, "common_pid != %d", getpid()) < 0) { free(rec_argv); return -ENOMEM; } p = rec_argv; for (i = 0; i < common_args_nr; i++) *p++ = strdup(common_args[i]); for (i = 0; i < disk_events_nr; i++) { if (!is_valid_tracepoint(disk_events[i])) { rec_argc -= 4; continue; } *p++ = "-e"; *p++ = strdup(disk_events[i]); *p++ = "--filter"; *p++ = filter; } for (i = 0; i < net_events_nr; i++) { if (!is_valid_tracepoint(net_events[i])) { rec_argc -= 4; continue; } *p++ = "-e"; *p++ = strdup(net_events[i]); *p++ = "--filter"; *p++ = filter; } for (i = 0; i < poll_events_nr; i++) { if (!is_valid_tracepoint(poll_events[i])) { rec_argc -= 4; continue; } *p++ = "-e"; *p++ = strdup(poll_events[i]); *p++ = "--filter"; *p++ = filter; } for (i = 0; i < (unsigned int)argc; i++) *p++ = argv[i]; return cmd_record(rec_argc, rec_argv); } static int timechart__record(struct timechart *tchart, int argc, const char **argv) { unsigned int rec_argc, i, j; const char **rec_argv; const char **p; unsigned int record_elems; const char * const common_args[] = { "record", "-a", "-R", "-c", "1", }; unsigned int common_args_nr = ARRAY_SIZE(common_args); const char * const backtrace_args[] = { "-g", }; unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args); const char * const power_args[] = { "-e", "power:cpu_frequency", "-e", "power:cpu_idle", }; unsigned int power_args_nr = ARRAY_SIZE(power_args); const char * const old_power_args[] = { #ifdef SUPPORT_OLD_POWER_EVENTS "-e", "power:power_start", "-e", "power:power_end", "-e", "power:power_frequency", #endif }; unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args); const char * const tasks_args[] = { "-e", "sched:sched_wakeup", "-e", "sched:sched_switch", }; unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args); #ifdef SUPPORT_OLD_POWER_EVENTS if (!is_valid_tracepoint("power:cpu_idle") && is_valid_tracepoint("power:power_start")) { use_old_power_events = 1; power_args_nr = 0; } else { old_power_args_nr = 0; } #endif if (tchart->power_only) tasks_args_nr = 0; if (tchart->tasks_only) { power_args_nr = 0; old_power_args_nr = 0; } if (!tchart->with_backtrace) backtrace_args_no = 0; record_elems = common_args_nr + tasks_args_nr + power_args_nr + old_power_args_nr + backtrace_args_no; rec_argc = record_elems + argc; rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) return -ENOMEM; p = rec_argv; for (i = 0; i < common_args_nr; i++) *p++ = strdup(common_args[i]); for (i = 0; i < backtrace_args_no; i++) *p++ = strdup(backtrace_args[i]); for (i = 0; i < tasks_args_nr; i++) *p++ = strdup(tasks_args[i]); for (i = 0; i < power_args_nr; i++) *p++ = strdup(power_args[i]); for (i = 0; i < old_power_args_nr; i++) *p++ = strdup(old_power_args[i]); for (j = 0; j < (unsigned int)argc; j++) *p++ = argv[j]; return cmd_record(rec_argc, rec_argv); } static int parse_process(const struct option *opt __maybe_unused, const char *arg, int __maybe_unused unset) { if (arg) add_process_filter(arg); return 0; } static int parse_highlight(const struct option *opt __maybe_unused, const char *arg, int __maybe_unused unset) { unsigned long duration = strtoul(arg, NULL, 0); if (svg_highlight || svg_highlight_name) return -1; if (duration) svg_highlight = duration; else svg_highlight_name = strdup(arg); return 0; } static int parse_time(const struct option *opt, const char *arg, int __maybe_unused unset) { char unit = 'n'; u64 *value = opt->value; if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) { switch (unit) { case 'm': *value *= NSEC_PER_MSEC; break; case 'u': *value *= NSEC_PER_USEC; break; case 'n': break; default: return -1; } } return 0; } int cmd_timechart(int argc, const char **argv) { struct timechart tchart = { .proc_num = 15, .min_time = NSEC_PER_MSEC, .merge_dist = 1000, }; const char *output_name = "output.svg"; const struct option timechart_common_options[] = { OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"), OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"), OPT_END() }; const struct option timechart_options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_STRING('o', "output", &output_name, "file", "output file name"), OPT_INTEGER('w', "width", &svg_page_width, "page width"), OPT_CALLBACK(0, "highlight", NULL, "duration or task name", "highlight tasks. Pass duration in ns or process name.", parse_highlight), OPT_CALLBACK('p', "process", NULL, "process", "process selector. Pass a pid or process name.", parse_process), OPT_CALLBACK(0, "symfs", NULL, "directory", "Look for files with symbols relative to this directory", symbol__config_symfs), OPT_INTEGER('n', "proc-num", &tchart.proc_num, "min. number of tasks to print"), OPT_BOOLEAN('t', "topology", &tchart.topology, "sort CPUs according to topology"), OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain, "skip EAGAIN errors"), OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time", "all IO faster than min-time will visually appear longer", parse_time), OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time", "merge events that are merge-dist us apart", parse_time), OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"), OPT_PARENT(timechart_common_options), }; const char * const timechart_subcommands[] = { "record", NULL }; const char *timechart_usage[] = { "perf timechart [<options>] {record}", NULL }; const struct option timechart_record_options[] = { OPT_BOOLEAN('I', "io-only", &tchart.io_only, "record only IO data"), OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"), OPT_PARENT(timechart_common_options), }; const char * const timechart_record_usage[] = { "perf timechart record [<options>]", NULL }; int ret; cpus_cstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_cstate_start_times)); if (!cpus_cstate_start_times) return -ENOMEM; cpus_cstate_state = calloc(MAX_CPUS, sizeof(*cpus_cstate_state)); if (!cpus_cstate_state) { ret = -ENOMEM; goto out; } cpus_pstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_pstate_start_times)); if (!cpus_pstate_start_times) { ret = -ENOMEM; goto out; } cpus_pstate_state = calloc(MAX_CPUS, sizeof(*cpus_pstate_state)); if (!cpus_pstate_state) { ret = -ENOMEM; goto out; } argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands, timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (tchart.power_only && tchart.tasks_only) { pr_err("-P and -T options cannot be used at the same time.\n"); ret = -1; goto out; } if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { argc = parse_options(argc, argv, timechart_record_options, timechart_record_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (tchart.power_only && tchart.tasks_only) { pr_err("-P and -T options cannot be used at the same time.\n"); ret = -1; goto out; } if (tchart.io_only) ret = timechart__io_record(argc, argv); else ret = timechart__record(&tchart, argc, argv); goto out; } else if (argc) usage_with_options(timechart_usage, timechart_options); setup_pager(); ret = __cmd_timechart(&tchart, output_name); out: zfree(&cpus_cstate_start_times); zfree(&cpus_cstate_state); zfree(&cpus_pstate_start_times); zfree(&cpus_pstate_state); return ret; }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1