Release 4.14 arch/arm64/kernel/stacktrace.c
/*
* Stack tracing support
*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/irq.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
* AArch64 PCS assigns the frame pointer to x29.
*
* A simple function prologue looks like this:
* sub sp, sp, #0x10
* stp x29, x30, [sp]
* mov x29, sp
*
* A simple function epilogue looks like this:
* mov sp, x29
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
if (fp & 0xf)
return -EINVAL;
if (!tsk)
tsk = current;
if (!on_accessible_stack(tsk, fp))
return -EINVAL;
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
frame->pc = tsk->ret_stack[frame->graph--].ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
* Frames created upon entry from EL0 have NULL FP and PC values, so
* don't bother reporting these. Frames created by __noreturn functions
* might have a valid FP even if PC is bogus, so only terminate where
* both are NULL.
*/
if (!frame->fp && !frame->pc)
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 59 | 37.82% | 1 | 7.69% |
AKASHI Takahiro | 51 | 32.69% | 4 | 30.77% |
Ard Biesheuvel | 18 | 11.54% | 2 | 15.38% |
Mark Rutland | 10 | 6.41% | 2 | 15.38% |
James Morse | 10 | 6.41% | 2 | 15.38% |
Yang Shi | 8 | 5.13% | 2 | 15.38% |
Total | 156 | 100.00% | 13 | 100.00% |
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
int ret;
if (fn(frame, data))
break;
ret = unwind_frame(tsk, frame);
if (ret < 0)
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 62 | 89.86% | 1 | 50.00% |
AKASHI Takahiro | 7 | 10.14% | 1 | 50.00% |
Total | 69 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned int no_sched_functions;
unsigned int skip;
};
static int save_trace(struct stackframe *frame, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
return 0;
if (data->skip) {
data->skip--;
return 0;
}
trace->entries[trace->nr_entries++] = addr;
return trace->nr_entries >= trace->max_entries;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 90 | 100.00% | 1 | 100.00% |
Total | 90 | 100.00% | 1 | 100.00% |
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = 0;
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
#endif
walk_stackframe(current, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pratyush Anand | 110 | 100.00% | 1 | 100.00% |
Total | 110 | 100.00% | 1 | 100.00% |
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
if (!try_get_task_stack(tsk))
return;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = nosched;
if (tsk != current) {
frame.fp = thread_saved_fp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
/* We don't want this function nor the caller */
data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = tsk->curr_ret_stack;
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
put_task_stack(tsk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 123 | 72.35% | 1 | 20.00% |
Prakash Gupta | 18 | 10.59% | 1 | 20.00% |
AKASHI Takahiro | 15 | 8.82% | 2 | 40.00% |
Mark Rutland | 14 | 8.24% | 1 | 20.00% |
Total | 170 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Prakash Gupta | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 16 | 84.21% | 1 | 50.00% |
Prakash Gupta | 3 | 15.79% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 395 | 56.19% | 1 | 5.00% |
Pratyush Anand | 110 | 15.65% | 1 | 5.00% |
AKASHI Takahiro | 79 | 11.24% | 4 | 20.00% |
Prakash Gupta | 45 | 6.40% | 1 | 5.00% |
Mark Rutland | 27 | 3.84% | 4 | 20.00% |
Ard Biesheuvel | 18 | 2.56% | 2 | 10.00% |
James Morse | 10 | 1.42% | 2 | 10.00% |
Yang Shi | 8 | 1.14% | 2 | 10.00% |
Ingo Molnar | 6 | 0.85% | 2 | 10.00% |
Dustin Brown | 5 | 0.71% | 1 | 5.00% |
Total | 703 | 100.00% | 20 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.