Contributors: 8
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Palmer Dabbelt |
546 |
73.29% |
2 |
14.29% |
Kefeng Wang |
75 |
10.07% |
4 |
28.57% |
Chen Huang |
59 |
7.92% |
2 |
14.29% |
Alan Kao |
27 |
3.62% |
1 |
7.14% |
Dmitry Safonov |
22 |
2.95% |
2 |
14.29% |
Guenter Roeck |
9 |
1.21% |
1 |
7.14% |
Christoph Hellwig |
5 |
0.67% |
1 |
7.14% |
Thomas Gleixner |
2 |
0.27% |
1 |
7.14% |
Total |
745 |
|
14 |
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 ARM Limited
* Copyright (C) 2014 Regents of the University of California
*/
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
#include <asm/stacktrace.h>
register unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == current) {
fp = (unsigned long)__builtin_frame_address(1);
sp = (unsigned long)__builtin_frame_address(0);
pc = (unsigned long)__builtin_return_address(0);
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
sp = task->thread.sp;
pc = task->thread.ra;
}
for (;;) {
unsigned long low, high;
struct stackframe *frame;
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
break;
/* Validate frame pointer */
low = sp + sizeof(struct stackframe);
high = ALIGN(sp, THREAD_SIZE);
if (unlikely(fp < low || fp > high || fp & 0x7))
break;
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp;
if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
fp = frame->ra;
pc = regs->ra;
} else {
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8));
}
}
}
#else /* !CONFIG_FRAME_POINTER */
void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long sp, pc;
unsigned long *ksp;
if (regs) {
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
sp = sp_in_global;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
sp = task->thread.sp;
pc = task->thread.ra;
}
if (unlikely(sp & 0x7))
return;
ksp = (unsigned long *)sp;
while (!kstack_end(ksp)) {
if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
break;
pc = (*ksp++) - 0x4;
}
}
#endif /* CONFIG_FRAME_POINTER */
static bool print_trace_address(void *arg, unsigned long pc)
{
const char *loglvl = arg;
print_ip_sym(loglvl, pc);
return true;
}
noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("%sCall Trace:\n", loglvl);
dump_backtrace(NULL, task, loglvl);
}
static bool save_wchan(void *arg, unsigned long pc)
{
if (!in_sched_functions(pc)) {
unsigned long *p = arg;
*p = pc;
return false;
}
return true;
}
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
if (likely(task && task != current && task->state != TASK_RUNNING))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
#ifdef CONFIG_STACKTRACE
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
}
#endif /* CONFIG_STACKTRACE */