Release 4.8 arch/x86/kernel/dumpstack_32.c
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
*/
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <asm/stacktrace.h>
static void *is_irq_stack(void *p, void *irq)
{
if (p < irq || p >= (irq + THREAD_SIZE))
return NULL;
return irq + THREAD_SIZE;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| steven rostedt | steven rostedt | 37 | 100.00% | 1 | 100.00% |
| Total | 37 | 100.00% | 1 | 100.00% |
static void *is_hardirq_stack(unsigned long *stack, int cpu)
{
void *irq = per_cpu(hardirq_stack, cpu);
return is_irq_stack(stack, irq);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| steven rostedt | steven rostedt | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
static void *is_softirq_stack(unsigned long *stack, int cpu)
{
void *irq = per_cpu(softirq_stack, cpu);
return is_irq_stack(stack, irq);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| steven rostedt | steven rostedt | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
int graph = 0;
u32 *prev_esp;
if (!task)
task = current;
if (!stack) {
unsigned long dummy;
stack = &dummy;
if (task != current)
stack = (unsigned long *)task->thread.sp;
}
if (!bp)
bp = stack_frame(task, regs);
for (;;) {
void *end_stack;
end_stack = is_hardirq_stack(stack, cpu);
if (!end_stack)
end_stack = is_softirq_stack(stack, cpu);
bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
/* Stop if not on irq stack */
if (!end_stack)
break;
/* The previous esp is saved on the bottom of the stack */
prev_esp = (u32 *)(end_stack - THREAD_SIZE);
stack = (unsigned long *)*prev_esp;
if (!stack)
break;
if (ops->stack(data, "IRQ") < 0)
break;
touch_nmi_watchdog();
}
put_cpu();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexander van heukelum | alexander van heukelum | 132 | 60.00% | 3 | 30.00% |
| steven rostedt | steven rostedt | 70 | 31.82% | 3 | 30.00% |
| namhyung kim | namhyung kim | 9 | 4.09% | 1 | 10.00% |
| soeren sandmann pedersen* | soeren sandmann pedersen* | 5 | 2.27% | 1 | 10.00% |
| frederic weisbecker | frederic weisbecker | 3 | 1.36% | 1 | 10.00% |
| linus torvalds | linus torvalds | 1 | 0.45% | 1 | 10.00% |
| Total | 220 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *stack;
int i;
if (sp == NULL) {
if (regs)
sp = (unsigned long *)regs->sp;
else if (task)
sp = (unsigned long *)task->thread.sp;
else
sp = (unsigned long *)&sp;
}
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
if ((i % STACKSLOTS_PER_LINE) == 0) {
if (i != 0)
pr_cont("\n");
printk("%s %08lx", log_lvl, *stack++);
} else
pr_cont(" %08lx", *stack++);
touch_nmi_watchdog();
}
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexander van heukelum | alexander van heukelum | 127 | 70.56% | 3 | 33.33% |
| adrien schildknecht | adrien schildknecht | 20 | 11.11% | 1 | 11.11% |
| andy lutomirski | andy lutomirski | 16 | 8.89% | 1 | 11.11% |
| neil horman | neil horman | 7 | 3.89% | 1 | 11.11% |
| namhyung kim | namhyung kim | 6 | 3.33% | 1 | 11.11% |
| joe perches | joe perches | 3 | 1.67% | 1 | 11.11% |
| jiri slaby | jiri slaby | 1 | 0.56% | 1 | 11.11% |
| Total | 180 | 100.00% | 9 | 100.00% |
void show_regs(struct pt_regs *regs)
{
int i;
show_regs_print_info(KERN_EMERG);
__show_regs(regs, !user_mode(regs));
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
if (!user_mode(regs)) {
unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes;
unsigned char c;
u8 *ip;
pr_emerg("Stack:\n");
show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG);
pr_emerg("Code:");
ip = (u8 *)regs->ip - code_prologue;
if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
/* try starting at IP */
ip = (u8 *)regs->ip;
code_len = code_len - code_prologue + 1;
}
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
pr_cont(" Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
pr_cont(" <%02x>", c);
else
pr_cont(" %02x", c);
}
}
pr_cont("\n");
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexander van heukelum | alexander van heukelum | 182 | 84.65% | 3 | 30.00% |
| joe perches | joe perches | 10 | 4.65% | 1 | 10.00% |
| neil horman | neil horman | 9 | 4.19% | 1 | 10.00% |
| tejun heo | tejun heo | 5 | 2.33% | 1 | 10.00% |
| jan beulich | jan beulich | 5 | 2.33% | 2 | 20.00% |
| andy lutomirski | andy lutomirski | 2 | 0.93% | 1 | 10.00% |
| namhyung kim | namhyung kim | 2 | 0.93% | 1 | 10.00% |
| Total | 215 | 100.00% | 10 | 100.00% |
int is_valid_bugaddr(unsigned long ip)
{
unsigned short ud2;
if (ip < PAGE_OFFSET)
return 0;
if (probe_kernel_address((unsigned short *)ip, ud2))
return 0;
return ud2 == 0x0b0f;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexander van heukelum | alexander van heukelum | 44 | 100.00% | 1 | 100.00% |
| Total | 44 | 100.00% | 1 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexander van heukelum | alexander van heukelum | 521 | 64.64% | 5 | 20.83% |
| steven rostedt | steven rostedt | 175 | 21.71% | 3 | 12.50% |
| adrien schildknecht | adrien schildknecht | 20 | 2.48% | 1 | 4.17% |
| andy lutomirski | andy lutomirski | 18 | 2.23% | 2 | 8.33% |
| namhyung kim | namhyung kim | 17 | 2.11% | 1 | 4.17% |
| neil horman | neil horman | 16 | 1.99% | 1 | 4.17% |
| joe perches | joe perches | 13 | 1.61% | 1 | 4.17% |
| jan beulich | jan beulich | 5 | 0.62% | 2 | 8.33% |
| soeren sandmann pedersen* | soeren sandmann pedersen* | 5 | 0.62% | 1 | 4.17% |
| tejun heo | tejun heo | 5 | 0.62% | 1 | 4.17% |
| frederic weisbecker | frederic weisbecker | 3 | 0.37% | 1 | 4.17% |
| ingo molnar | ingo molnar | 3 | 0.37% | 1 | 4.17% |
| andrew morton | andrew morton | 2 | 0.25% | 1 | 4.17% |
| jiri slaby | jiri slaby | 1 | 0.12% | 1 | 4.17% |
| paul gortmaker | paul gortmaker | 1 | 0.12% | 1 | 4.17% |
| linus torvalds | linus torvalds | 1 | 0.12% | 1 | 4.17% |
| Total | 806 | 100.00% | 24 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.