Contributors: 6
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Miroslav Benes |
136 |
63.85% |
1 |
10.00% |
Vasily Gorbik |
29 |
13.62% |
1 |
10.00% |
Martin Schwidefsky |
25 |
11.74% |
1 |
10.00% |
Heiko Carstens |
18 |
8.45% |
5 |
50.00% |
Pratyush Anand |
4 |
1.88% |
1 |
10.00% |
Greg Kroah-Hartman |
1 |
0.47% |
1 |
10.00% |
Total |
213 |
|
10 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace management functions
*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
#include <asm/kprobes.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, task, regs, 0) {
addr = unwind_get_return_address(&state);
if (!addr || !consume_entry(cookie, addr, false))
break;
}
}
/*
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, task, NULL, 0) {
if (state.stack_info.type != STACK_TYPE_TASK)
return -EINVAL;
if (state.regs)
return -EINVAL;
addr = unwind_get_return_address(&state);
if (!addr)
return -EINVAL;
#ifdef CONFIG_KPROBES
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable.
*/
if (state.ip == (unsigned long)kretprobe_trampoline)
return -EINVAL;
#endif
if (!consume_entry(cookie, addr, false))
return -EINVAL;
}
/* Check for stack corruption */
if (unwind_error(&state))
return -EINVAL;
return 0;
}