Release 4.14 arch/x86/kernel/stacktrace.c
/*
* Stack trace management functions
*
* Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
static int save_stack_address(struct stack_trace *trace, unsigned long addr,
bool nosched)
{
if (nosched && in_sched_functions(addr))
return 0;
if (trace->skip > 0) {
trace->skip--;
return 0;
}
if (trace->nr_entries >= trace->max_entries)
return -1;
trace->entries[trace->nr_entries++] = addr;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 25 | 32.47% | 1 | 14.29% |
Andi Kleen | 18 | 23.38% | 1 | 14.29% |
Alexei Starovoitov | 11 | 14.29% | 1 | 14.29% |
Oleg Nesterov | 11 | 14.29% | 1 | 14.29% |
Josh Poimboeuf | 9 | 11.69% | 1 | 14.29% |
Vegard Nossum | 2 | 2.60% | 1 | 14.29% |
Arjan van de Ven | 1 | 1.30% | 1 | 14.29% |
Total | 77 | 100.00% | 7 | 100.00% |
static void __save_stack_trace(struct stack_trace *trace,
struct task_struct *task, struct pt_regs *regs,
bool nosched)
{
struct unwind_state state;
unsigned long addr;
if (regs)
save_stack_address(trace, regs->ip, nosched);
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || save_stack_address(trace, addr, nosched))
break;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 75 | 61.98% | 1 | 12.50% |
Ingo Molnar | 16 | 13.22% | 1 | 12.50% |
Catalin Marinas | 10 | 8.26% | 1 | 12.50% |
Arjan van de Ven | 9 | 7.44% | 2 | 25.00% |
Oleg Nesterov | 7 | 5.79% | 1 | 12.50% |
Andi Kleen | 3 | 2.48% | 1 | 12.50% |
Namhyung Kim | 1 | 0.83% | 1 | 12.50% |
Total | 121 | 100.00% | 8 | 100.00% |
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(trace, current, NULL, false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
__save_stack_trace(trace, current, regs, false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vegard Nossum | 14 | 53.85% | 1 | 25.00% |
Masami Hiramatsu | 4 | 15.38% | 1 | 25.00% |
Soeren Sandmann Pedersen | 4 | 15.38% | 1 | 25.00% |
Josh Poimboeuf | 4 | 15.38% | 1 | 25.00% |
Total | 26 | 100.00% | 4 | 100.00% |
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
if (!try_get_task_stack(tsk))
return;
__save_stack_trace(trace, tsk, NULL, true);
put_task_stack(tsk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arjan van de Ven | 22 | 55.00% | 1 | 33.33% |
Andrew Lutomirski | 14 | 35.00% | 1 | 33.33% |
Josh Poimboeuf | 4 | 10.00% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
#define STACKTRACE_DUMP_ONCE(task) ({ \
static bool __section(.data.unlikely) __dumped; \
\
if (!__dumped) { \
__dumped = true; \
WARN_ON(1); \
show_stack(task, NULL); \
} \
})
static int __save_stack_trace_reliable(struct stack_trace *trace,
struct task_struct *task)
{
struct unwind_state state;
struct pt_regs *regs;
unsigned long addr;
for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
regs = unwind_get_entry_regs(&state);
if (regs) {
/*
* Kernel mode registers on the stack indicate an
* in-kernel interrupt or exception (e.g., preemption
* or a page fault), which can make frame pointers
* unreliable.
*/
if (!user_mode(regs))
return -EINVAL;
/*
* The last frame contains the user mode syscall
* pt_regs. Skip it and finish the unwind.
*/
unwind_next_frame(&state);
if (!unwind_done(&state)) {
STACKTRACE_DUMP_ONCE(task);
return -EINVAL;
}
break;
}
addr = unwind_get_return_address(&state);
/*
* A NULL or invalid return address probably means there's some
* generated code which __kernel_text_address() doesn't know
* about.
*/
if (!addr) {
STACKTRACE_DUMP_ONCE(task);
return -EINVAL;
}
if (save_stack_address(trace, addr, false))
return -EINVAL;
}
/* Check for stack corruption */
if (unwind_error(&state)) {
STACKTRACE_DUMP_ONCE(task);
return -EINVAL;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 198 | 100.00% | 1 | 100.00% |
Total | 198 | 100.00% | 1 | 100.00% |
/*
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
int save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace)
{
int ret;
if (!try_get_task_stack(tsk))
return -EINVAL;
ret = __save_stack_trace_reliable(trace, tsk);
put_task_stack(tsk);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 47 | 100.00% | 1 | 100.00% |
Total | 47 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
struct stack_frame_user {
const void __user *next_fp;
unsigned long ret_addr;
};
static int
copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
{
int ret;
if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
return 0;
ret = 1;
pagefault_disable();
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
ret = 0;
pagefault_enable();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Török Edwin | 70 | 98.59% | 1 | 50.00% |
Frédéric Weisbecker | 1 | 1.41% | 1 | 50.00% |
Total | 71 | 100.00% | 2 | 100.00% |
static inline void __save_stack_trace_user(struct stack_trace *trace)
{
const struct pt_regs *regs = task_pt_regs(current);
const void __user *fp = (const void __user *)regs->bp;
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = regs->ip;
while (trace->nr_entries < trace->max_entries) {
struct stack_frame_user frame;
frame.next_fp = NULL;
frame.ret_addr = 0;
if (!copy_stack_frame(fp, &frame))
break;
if ((unsigned long)fp < regs->sp)
break;
if (frame.ret_addr) {
trace->entries[trace->nr_entries++] =
frame.ret_addr;
}
if (fp == frame.next_fp)
break;
fp = frame.next_fp;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Török Edwin | 152 | 99.35% | 2 | 66.67% |
Frédéric Weisbecker | 1 | 0.65% | 1 | 33.33% |
Total | 153 | 100.00% | 3 | 100.00% |
void save_stack_trace_user(struct stack_trace *trace)
{
/*
* Trace user stack if we are not a kernel thread
*/
if (current->mm) {
__save_stack_trace_user(trace);
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Török Edwin | 46 | 100.00% | 2 | 100.00% |
Total | 46 | 100.00% | 2 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Josh Poimboeuf | 376 | 43.42% | 2 | 8.33% |
Török Edwin | 286 | 33.03% | 2 | 8.33% |
Ingo Molnar | 66 | 7.62% | 5 | 20.83% |
Arjan van de Ven | 32 | 3.70% | 3 | 12.50% |
Andi Kleen | 24 | 2.77% | 1 | 4.17% |
Oleg Nesterov | 18 | 2.08% | 1 | 4.17% |
Vegard Nossum | 16 | 1.85% | 2 | 8.33% |
Andrew Lutomirski | 14 | 1.62% | 1 | 4.17% |
Alexei Starovoitov | 11 | 1.27% | 1 | 4.17% |
Catalin Marinas | 10 | 1.15% | 1 | 4.17% |
Masami Hiramatsu | 4 | 0.46% | 1 | 4.17% |
Soeren Sandmann Pedersen | 4 | 0.46% | 1 | 4.17% |
Frédéric Weisbecker | 3 | 0.35% | 1 | 4.17% |
Paul Gortmaker | 1 | 0.12% | 1 | 4.17% |
Namhyung Kim | 1 | 0.12% | 1 | 4.17% |
Total | 866 | 100.00% | 24 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.