Contributors: 16
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Josh Poimboeuf |
182 |
36.99% |
4 |
14.81% |
Török Edwin |
166 |
33.74% |
2 |
7.41% |
Thomas Gleixner |
52 |
10.57% |
2 |
7.41% |
Al Viro |
18 |
3.66% |
1 |
3.70% |
Ingo Molnar |
18 |
3.66% |
4 |
14.81% |
Arjan van de Ven |
15 |
3.05% |
3 |
11.11% |
Jiri Slaby |
10 |
2.03% |
2 |
7.41% |
Oleg Nesterov |
9 |
1.83% |
1 |
3.70% |
Andi Kleen |
8 |
1.63% |
1 |
3.70% |
Frédéric Weisbecker |
3 |
0.61% |
1 |
3.70% |
Steven Rostedt |
3 |
0.61% |
1 |
3.70% |
Vegard Nossum |
2 |
0.41% |
1 |
3.70% |
Arnd Bergmann |
2 |
0.41% |
1 |
3.70% |
Eiichi Tsukata |
2 |
0.41% |
1 |
3.70% |
Paul Gortmaker |
1 |
0.20% |
1 |
3.70% |
Namhyung Kim |
1 |
0.20% |
1 |
3.70% |
Total |
492 |
|
27 |
|
/*
* Stack trace management functions
*
* Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
if (regs && !consume_entry(cookie, regs->ip))
return;
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || !consume_entry(cookie, addr))
break;
}
}
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
struct unwind_state state;
struct pt_regs *regs;
unsigned long addr;
for (unwind_start(&state, task, NULL, NULL);
!unwind_done(&state) && !unwind_error(&state);
unwind_next_frame(&state)) {
regs = unwind_get_entry_regs(&state, NULL);
if (regs) {
/* Success path for user tasks */
if (user_mode(regs))
return 0;
/*
* Kernel mode registers on the stack indicate an
* in-kernel interrupt or exception (e.g., preemption
* or a page fault), which can make frame pointers
* unreliable.
*/
if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
}
addr = unwind_get_return_address(&state);
/*
* A NULL or invalid return address probably means there's some
* generated code which __kernel_text_address() doesn't know
* about.
*/
if (!addr)
return -EINVAL;
if (!consume_entry(cookie, addr))
return -EINVAL;
}
/* Check for stack corruption */
if (unwind_error(&state))
return -EINVAL;
return 0;
}
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
struct stack_frame_user {
const void __user *next_fp;
unsigned long ret_addr;
};
static int
copy_stack_frame(const struct stack_frame_user __user *fp,
struct stack_frame_user *frame)
{
int ret;
if (!__access_ok(fp, sizeof(*frame)))
return 0;
ret = 1;
pagefault_disable();
if (__get_user(frame->next_fp, &fp->next_fp) ||
__get_user(frame->ret_addr, &fp->ret_addr))
ret = 0;
pagefault_enable();
return ret;
}
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
const void __user *fp = (const void __user *)regs->bp;
if (!consume_entry(cookie, regs->ip))
return;
while (1) {
struct stack_frame_user frame;
frame.next_fp = NULL;
frame.ret_addr = 0;
if (!copy_stack_frame(fp, &frame))
break;
if ((unsigned long)fp < regs->sp)
break;
if (!frame.ret_addr)
break;
if (!consume_entry(cookie, frame.ret_addr))
break;
fp = frame.next_fp;
}
}