Release 4.14 arch/sh/kernel/stacktrace.c
/*
* arch/sh/kernel/stacktrace.c
*
* Stack trace management functions
*
* Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
#include <asm/unwinder.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
static int save_stack_stack(void *data, char *name)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Fleming | 16 | 94.12% | 1 | 50.00% |
Paul Mundt | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
static void save_stack_address(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = data;
if (!reliable)
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 37 | 54.41% | 2 | 66.67% |
Matt Fleming | 31 | 45.59% | 1 | 33.33% |
Total | 68 | 100.00% | 3 | 100.00% |
static const struct stacktrace_ops save_stack_ops = {
.stack = save_stack_stack,
.address = save_stack_address,
};
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)current_stack_pointer;
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 44 | 75.86% | 2 | 50.00% |
Matt Fleming | 14 | 24.14% | 2 | 50.00% |
Total | 58 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(save_stack_trace);
static void
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = (struct stack_trace *)data;
if (!reliable)
return;
if (in_sched_functions(addr))
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 44 | 54.32% | 2 | 66.67% |
Matt Fleming | 37 | 45.68% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
static const struct stacktrace_ops save_stack_ops_nosched = {
.stack = save_stack_stack,
.address = save_stack_address_nosched,
};
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)tsk->thread.sp;
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Fleming | 41 | 61.19% | 2 | 50.00% |
Paul Mundt | 26 | 38.81% | 2 | 50.00% |
Total | 67 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Fleming | 187 | 51.52% | 2 | 25.00% |
Paul Mundt | 170 | 46.83% | 4 | 50.00% |
Heiko Carstens | 3 | 0.83% | 1 | 12.50% |
Ingo Molnar | 3 | 0.83% | 1 | 12.50% |
Total | 363 | 100.00% | 8 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.