cregit-Linux how code gets into the kernel

Release 4.11 arch/x86/kernel/irq_32.c

Directory: arch/x86/kernel
/*
 *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
 *
 * This file contains the lowest level x86-specific interrupt
 * entry, irq-stacks and irq statistics code. All the remaining
 * irq logic is done by the generic kernel/irq/ code and
 * by the x86-specific irq controller code. (e.g. i8259.c and
 * io_apic.c.)
 */

#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>
#include <linux/mm.h>

#include <asm/apic.h>

#ifdef CONFIG_DEBUG_STACKOVERFLOW


int sysctl_panic_on_stackoverflow __read_mostly;

/* Debugging check for stack overflow: is there less than 1KB free? */

static int check_stack_overflow(void) { long sp; __asm__ __volatile__("andl %%esp,%0" : "=r" (sp) : "0" (THREAD_SIZE - 1)); return sp < (sizeof(struct thread_info) + STACK_WARN); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner27100.00%1100.00%
Total27100.00%1100.00%


static void print_stack_overflow(void) { printk(KERN_WARNING "low stack detected by irq handler\n"); dump_stack(); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner1765.38%150.00%
Mitsuo Hayasaka934.62%150.00%
Total26100.00%2100.00%

#else
static inline int check_stack_overflow(void) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner12100.00%1100.00%
Total12100.00%1100.00%


static inline void print_stack_overflow(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner8100.00%1100.00%
Total8100.00%1100.00%

#endif DEFINE_PER_CPU(struct irq_stack *, hardirq_stack); DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" "call *%%edi \n" "movl %%ebx,%%esp \n" : "=b" (stack) : "0" (stack), "D"(func) : "memory", "cc", "edx", "ecx", "eax"); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen1164.71%114.29%
Linus Torvalds211.76%228.57%
Thomas Gleixner211.76%228.57%
Linus Torvalds (pre-git)15.88%114.29%
Ingo Molnar15.88%114.29%
Total17100.00%7100.00%


static inline void *current_stack(void) { return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2696.30%150.00%
Andrew Lutomirski13.70%150.00%
Total27100.00%2100.00%


static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) { struct irq_stack *curstk, *irqstk; u32 *isp, *prev_esp, arg1; curstk = (struct irq_stack *) current_stack(); irqstk = __this_cpu_read(hardirq_stack); /* * this is where we switch to the IRQ stack. However, if we are * already using the IRQ stack (because we interrupted a hardirq * handler) we can't do that and just have to keep using the * current stack (which is the irq stack already after all) */ if (unlikely(curstk == irqstk)) return 0; isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); /* Save the next esp at the bottom of the stack */ prev_esp = (u32 *)irqstk; *prev_esp = current_stack_pointer(); if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); asm volatile("xchgl %%ebx,%%esp \n" "call *%%edi \n" "movl %%ebx,%%esp \n" : "=a" (arg1), "=b" (isp) : "0" (desc), "1" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3630.77%29.09%
Steven Rostedt2823.93%29.09%
Thomas Gleixner2117.95%313.64%
Linus Torvalds (pre-git)1815.38%1045.45%
Andi Kleen75.98%14.55%
David Howells32.56%14.55%
Lai Jiangshan21.71%14.55%
Andrew Lutomirski10.85%14.55%
Tejun Heo10.85%14.55%
Total117100.00%22100.00%

/* * allocate per-cpu stacks for hardirq and for softirq processing */
void irq_ctx_init(int cpu) { struct irq_stack *irqstk; if (per_cpu(hardirq_stack, cpu)) return; irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); per_cpu(hardirq_stack, cpu) = irqstk; irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); per_cpu(softirq_stack, cpu) = irqstk; printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan2626.80%17.69%
Eric Dumazet1818.56%17.69%
Ingo Molnar1818.56%17.69%
Linus Torvalds (pre-git)1515.46%646.15%
Steven Rostedt1212.37%17.69%
Thomas Gleixner55.15%215.38%
Andrew Morton33.09%17.69%
Total97100.00%13100.00%


void do_softirq_own_stack(void) { struct irq_stack *irqstk; u32 *isp, *prev_esp; irqstk = __this_cpu_read(softirq_stack); /* build the stack frame on the softirq stack */ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); /* Push the previous esp onto the stack */ prev_esp = (u32 *)irqstk; *prev_esp = current_stack_pointer(); call_on_stack(__do_softirq, isp); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2434.78%18.33%
Steven Rostedt2333.33%216.67%
Thomas Gleixner710.14%18.33%
Linus Torvalds45.80%18.33%
Andrew Morton34.35%216.67%
Linus Torvalds (pre-git)34.35%18.33%
Lai Jiangshan22.90%18.33%
Tejun Heo11.45%18.33%
Andrew Lutomirski11.45%18.33%
Frédéric Weisbecker11.45%18.33%
Total69100.00%12100.00%


bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) { int overflow = check_stack_overflow(); if (IS_ERR_OR_NULL(desc)) return false; if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { if (unlikely(overflow)) print_stack_overflow(); generic_handle_irq_desc(desc); } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge5380.30%120.00%
Thomas Gleixner812.12%240.00%
Linus Torvalds46.06%120.00%
Andrew Lutomirski11.52%120.00%
Total66100.00%5100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner11621.97%612.24%
Steven Rostedt9517.99%24.08%
Ingo Molnar8516.10%36.12%
Jeremy Fitzhardinge5310.04%12.04%
Linus Torvalds (pre-git)458.52%1632.65%
Lai Jiangshan448.33%12.04%
Eric Dumazet213.98%12.04%
Andi Kleen183.41%12.04%
Linus Torvalds101.89%48.16%
Zwane Mwaikambo91.70%12.04%
Mitsuo Hayasaka91.70%12.04%
Andrew Morton81.52%48.16%
Andrew Lutomirski40.76%24.08%
David Howells30.57%12.04%
Stéphane Eranian20.38%12.04%
Jaswinder Singh Rajput20.38%12.04%
Tejun Heo20.38%12.04%
Dave Jones10.19%12.04%
Frédéric Weisbecker10.19%12.04%
Total528100.00%49100.00%
Directory: arch/x86/kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.