Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Arnaldo Carvalho de Melo | 499 | 23.04% | 32 | 42.11% |
Frédéric Weisbecker | 408 | 18.84% | 8 | 10.53% |
Kan Liang | 329 | 15.19% | 4 | 5.26% |
Andi Kleen | 227 | 10.48% | 3 | 3.95% |
Jiri Olsa | 222 | 10.25% | 4 | 5.26% |
Adrian Hunter | 172 | 7.94% | 11 | 14.47% |
Hari Bathini | 168 | 7.76% | 1 | 1.32% |
Namhyung Kim | 63 | 2.91% | 4 | 5.26% |
He Kuang | 23 | 1.06% | 2 | 2.63% |
David S. Miller | 19 | 0.88% | 2 | 2.63% |
Krister Johansen | 17 | 0.78% | 1 | 1.32% |
David Ahern | 12 | 0.55% | 1 | 1.32% |
John Keeping | 3 | 0.14% | 1 | 1.32% |
Elena Reshetova | 3 | 0.14% | 1 | 1.32% |
Greg Kroah-Hartman | 1 | 0.05% | 1 | 1.32% |
Total | 2166 | 76 |
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
// SPDX-License-Identifier: GPL-2.0 #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <linux/kernel.h> #include <linux/zalloc.h> #include "dso.h" #include "session.h" #include "thread.h" #include "thread-stack.h" #include "debug.h" #include "namespaces.h" #include "comm.h" #include "map.h" #include "symbol.h" #include "unwind.h" #include "callchain.h" #include <api/fs/fs.h> int thread__init_maps(struct thread *thread, struct machine *machine) { pid_t pid = thread->pid_; if (pid == thread->tid || pid == -1) { thread->maps = maps__new(machine); } else { struct thread *leader = __machine__findnew_thread(machine, pid, pid); if (leader) { thread->maps = maps__get(leader->maps); thread__put(leader); } } return thread->maps ? 0 : -1; } struct thread *thread__new(pid_t pid, pid_t tid) { char *comm_str; struct comm *comm; struct thread *thread = zalloc(sizeof(*thread)); if (thread != NULL) { thread->pid_ = pid; thread->tid = tid; thread->ppid = -1; thread->cpu = -1; thread->guest_cpu = -1; thread->lbr_stitch_enable = false; INIT_LIST_HEAD(&thread->namespaces_list); INIT_LIST_HEAD(&thread->comm_list); init_rwsem(&thread->namespaces_lock); init_rwsem(&thread->comm_lock); comm_str = malloc(32); if (!comm_str) goto err_thread; snprintf(comm_str, 32, ":%d", tid); comm = comm__new(comm_str, 0, false); free(comm_str); if (!comm) goto err_thread; list_add(&comm->list, &thread->comm_list); refcount_set(&thread->refcnt, 1); RB_CLEAR_NODE(&thread->rb_node); /* Thread holds first ref to nsdata. */ thread->nsinfo = nsinfo__new(pid); srccode_state_init(&thread->srccode_state); } return thread; err_thread: free(thread); return NULL; } void thread__delete(struct thread *thread) { struct namespaces *namespaces, *tmp_namespaces; struct comm *comm, *tmp_comm; BUG_ON(!RB_EMPTY_NODE(&thread->rb_node)); thread_stack__free(thread); if (thread->maps) { maps__put(thread->maps); thread->maps = NULL; } down_write(&thread->namespaces_lock); list_for_each_entry_safe(namespaces, tmp_namespaces, &thread->namespaces_list, list) { list_del_init(&namespaces->list); namespaces__free(namespaces); } up_write(&thread->namespaces_lock); down_write(&thread->comm_lock); list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { list_del_init(&comm->list); comm__free(comm); } up_write(&thread->comm_lock); nsinfo__zput(thread->nsinfo); srccode_state_free(&thread->srccode_state); exit_rwsem(&thread->namespaces_lock); exit_rwsem(&thread->comm_lock); thread__free_stitch_list(thread); free(thread); } struct thread *thread__get(struct thread *thread) { if (thread) refcount_inc(&thread->refcnt); return thread; } void thread__put(struct thread *thread) { if (thread && refcount_dec_and_test(&thread->refcnt)) { /* * Remove it from the dead threads list, as last reference is * gone, if it is in a dead threads list. * * We may not be there anymore if say, the machine where it was * stored was already deleted, so we already removed it from * the dead threads and some other piece of code still keeps a * reference. * * This is what 'perf sched' does and finally drops it in * perf_sched__lat(), where it calls perf_sched__read_events(), * that processes the events by creating a session and deleting * it, which ends up destroying the list heads for the dead * threads, but before it does that it removes all threads from * it using list_del_init(). * * So we need to check here if it is in a dead threads list and * if so, remove it before finally deleting the thread, to avoid * an use after free situation. */ if (!list_empty(&thread->node)) list_del_init(&thread->node); thread__delete(thread); } } static struct namespaces *__thread__namespaces(const struct thread *thread) { if (list_empty(&thread->namespaces_list)) return NULL; return list_first_entry(&thread->namespaces_list, struct namespaces, list); } struct namespaces *thread__namespaces(struct thread *thread) { struct namespaces *ns; down_read(&thread->namespaces_lock); ns = __thread__namespaces(thread); up_read(&thread->namespaces_lock); return ns; } static int __thread__set_namespaces(struct thread *thread, u64 timestamp, struct perf_record_namespaces *event) { struct namespaces *new, *curr = __thread__namespaces(thread); new = namespaces__new(event); if (!new) return -ENOMEM; list_add(&new->list, &thread->namespaces_list); if (timestamp && curr) { /* * setns syscall must have changed few or all the namespaces * of this thread. Update end time for the namespaces * previously used. */ curr = list_next_entry(new, list); curr->end_time = timestamp; } return 0; } int thread__set_namespaces(struct thread *thread, u64 timestamp, struct perf_record_namespaces *event) { int ret; down_write(&thread->namespaces_lock); ret = __thread__set_namespaces(thread, timestamp, event); up_write(&thread->namespaces_lock); return ret; } struct comm *thread__comm(const struct thread *thread) { if (list_empty(&thread->comm_list)) return NULL; return list_first_entry(&thread->comm_list, struct comm, list); } struct comm *thread__exec_comm(const struct thread *thread) { struct comm *comm, *last = NULL, *second_last = NULL; list_for_each_entry(comm, &thread->comm_list, list) { if (comm->exec) return comm; second_last = last; last = comm; } /* * 'last' with no start time might be the parent's comm of a synthesized * thread (created by processing a synthesized fork event). For a main * thread, that is very probably wrong. Prefer a later comm to avoid * that case. */ if (second_last && !last->start && thread->pid_ == thread->tid) return second_last; return last; } static int ____thread__set_comm(struct thread *thread, const char *str, u64 timestamp, bool exec) { struct comm *new, *curr = thread__comm(thread); /* Override the default :tid entry */ if (!thread->comm_set) { int err = comm__override(curr, str, timestamp, exec); if (err) return err; } else { new = comm__new(str, timestamp, exec); if (!new) return -ENOMEM; list_add(&new->list, &thread->comm_list); if (exec) unwind__flush_access(thread->maps); } thread->comm_set = true; return 0; } int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, bool exec) { int ret; down_write(&thread->comm_lock); ret = ____thread__set_comm(thread, str, timestamp, exec); up_write(&thread->comm_lock); return ret; } int thread__set_comm_from_proc(struct thread *thread) { char path[64]; char *comm = NULL; size_t sz; int err = -1; if (!(snprintf(path, sizeof(path), "%d/task/%d/comm", thread->pid_, thread->tid) >= (int)sizeof(path)) && procfs__read_str(path, &comm, &sz) == 0) { comm[sz - 1] = '\0'; err = thread__set_comm(thread, comm, 0); } return err; } static const char *__thread__comm_str(const struct thread *thread) { const struct comm *comm = thread__comm(thread); if (!comm) return NULL; return comm__str(comm); } const char *thread__comm_str(struct thread *thread) { const char *str; down_read(&thread->comm_lock); str = __thread__comm_str(thread); up_read(&thread->comm_lock); return str; } /* CHECKME: it should probably better return the max comm len from its comm list */ int thread__comm_len(struct thread *thread) { if (!thread->comm_len) { const char *comm = thread__comm_str(thread); if (!comm) return 0; thread->comm_len = strlen(comm); } return thread->comm_len; } size_t thread__fprintf(struct thread *thread, FILE *fp) { return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + maps__fprintf(thread->maps, fp); } int thread__insert_map(struct thread *thread, struct map *map) { int ret; ret = unwind__prepare_access(thread->maps, map, NULL); if (ret) return ret; maps__fixup_overlappings(thread->maps, map, stderr); maps__insert(thread->maps, map); return 0; } static int __thread__prepare_access(struct thread *thread) { bool initialized = false; int err = 0; struct maps *maps = thread->maps; struct map *map; down_read(&maps->lock); maps__for_each_entry(maps, map) { err = unwind__prepare_access(thread->maps, map, &initialized); if (err || initialized) break; } up_read(&maps->lock); return err; } static int thread__prepare_access(struct thread *thread) { int err = 0; if (dwarf_callchain_users) err = __thread__prepare_access(thread); return err; } static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone) { /* This is new thread, we share map groups for process. */ if (thread->pid_ == parent->pid_) return thread__prepare_access(thread); if (thread->maps == parent->maps) { pr_debug("broken map groups on thread %d/%d parent %d/%d\n", thread->pid_, thread->tid, parent->pid_, parent->tid); return 0; } /* But this one is new process, copy maps. */ return do_maps_clone ? maps__clone(thread, parent->maps) : 0; } int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) { if (parent->comm_set) { const char *comm = thread__comm_str(parent); int err; if (!comm) return -ENOMEM; err = thread__set_comm(thread, comm, timestamp); if (err) return err; } thread->ppid = parent->tid; return thread__clone_maps(thread, parent, do_maps_clone); } void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, struct addr_location *al) { size_t i; const u8 cpumodes[] = { PERF_RECORD_MISC_USER, PERF_RECORD_MISC_KERNEL, PERF_RECORD_MISC_GUEST_USER, PERF_RECORD_MISC_GUEST_KERNEL }; for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { thread__find_symbol(thread, cpumodes[i], addr, al); if (al->map) break; } } struct thread *thread__main_thread(struct machine *machine, struct thread *thread) { if (thread->pid_ == thread->tid) return thread__get(thread); if (thread->pid_ == -1) return NULL; return machine__find_thread(machine, thread->pid_, thread->pid_); } int thread__memcpy(struct thread *thread, struct machine *machine, void *buf, u64 ip, int len, bool *is64bit) { u8 cpumode = PERF_RECORD_MISC_USER; struct addr_location al; long offset; if (machine__kernel_ip(machine, ip)) cpumode = PERF_RECORD_MISC_KERNEL; if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso || al.map->dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) return -1; offset = al.map->map_ip(al.map, ip); if (is64bit) *is64bit = al.map->dso->is_64_bit; return dso__data_read_offset(al.map->dso, machine, offset, buf, len); } void thread__free_stitch_list(struct thread *thread) { struct lbr_stitch *lbr_stitch = thread->lbr_stitch; struct stitch_list *pos, *tmp; if (!lbr_stitch) return; list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) { list_del_init(&pos->node); free(pos); } list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) { list_del_init(&pos->node); free(pos); } zfree(&lbr_stitch->prev_lbr_cursor); zfree(&thread->lbr_stitch); }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1