cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/kernel/irq_32.c

Directory: arch/x86/kernel
// SPDX-License-Identifier: GPL-2.0
/*
 *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
 *
 * This file contains the lowest level x86-specific interrupt
 * entry, irq-stacks and irq statistics code. All the remaining
 * irq logic is done by the generic kernel/irq/ code and
 * by the x86-specific irq controller code. (e.g. i8259.c and
 * io_apic.c.)
 */

#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>
#include <linux/mm.h>

#include <asm/apic.h>

#ifdef CONFIG_DEBUG_STACKOVERFLOW


int sysctl_panic_on_stackoverflow __read_mostly;

/* Debugging check for stack overflow: is there less than 1KB free? */

static int check_stack_overflow(void) { long sp; __asm__ __volatile__("andl %%esp,%0" : "=r" (sp) : "0" (THREAD_SIZE - 1)); return sp < (sizeof(struct thread_info) + STACK_WARN); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner27100.00%1100.00%
Total27100.00%1100.00%


static void print_stack_overflow(void) { printk(KERN_WARNING "low stack detected by irq handler\n"); dump_stack(); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner1765.38%150.00%
Mitsuo Hayasaka934.62%150.00%
Total26100.00%2100.00%

#else
static inline int check_stack_overflow(void) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner12100.00%1100.00%
Total12100.00%1100.00%


static inline void print_stack_overflow(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner8100.00%1100.00%
Total8100.00%1100.00%

#endif DEFINE_PER_CPU(struct irq_stack *, hardirq_stack); DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" "call *%%edi \n" "movl %%ebx,%%esp \n" : "=b" (stack) : "0" (stack), "D"(func) : "memory", "cc", "edx", "ecx", "eax"); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen1164.71%114.29%
Thomas Gleixner211.76%228.57%
Linus Torvalds211.76%228.57%
Linus Torvalds (pre-git)15.88%114.29%
Ingo Molnar15.88%114.29%
Total17100.00%7100.00%


static inline void *current_stack(void) { return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt26100.00%1100.00%
Total26100.00%1100.00%


static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) { struct irq_stack *curstk, *irqstk; u32 *isp, *prev_esp, arg1; curstk = (struct irq_stack *) current_stack(); irqstk = __this_cpu_read(hardirq_stack); /* * this is where we switch to the IRQ stack. However, if we are * already using the IRQ stack (because we interrupted a hardirq * handler) we can't do that and just have to keep using the * current stack (which is the irq stack already after all) */ if (unlikely(curstk == irqstk)) return 0; isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); /* Save the next esp at the bottom of the stack */ prev_esp = (u32 *)irqstk; *prev_esp = current_stack_pointer; if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); asm volatile("xchgl %%ebx,%%esp \n" "call *%%edi \n" "movl %%ebx,%%esp \n" : "=a" (arg1), "=b" (isp) : "0" (desc), "1" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5143.97%216.67%
Steven Rostedt2824.14%216.67%
Thomas Gleixner2118.10%325.00%
Andi Kleen76.03%18.33%
David Howells32.59%18.33%
Linus Torvalds (pre-git)32.59%18.33%
Lai Jiangshan21.72%18.33%
Tejun Heo10.86%18.33%
Total116100.00%12100.00%

/* * allocate per-cpu stacks for hardirq and for softirq processing */
void irq_ctx_init(int cpu) { struct irq_stack *irqstk; if (per_cpu(hardirq_stack, cpu)) return; irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); per_cpu(hardirq_stack, cpu) = irqstk; irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); per_cpu(softirq_stack, cpu) = irqstk; printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan2626.80%111.11%
Ingo Molnar2121.65%111.11%
Eric Dumazet1818.56%111.11%
Linus Torvalds (pre-git)1414.43%222.22%
Steven Rostedt1212.37%111.11%
Thomas Gleixner55.15%222.22%
Linus Torvalds11.03%111.11%
Total97100.00%9100.00%


void do_softirq_own_stack(void) { struct irq_stack *irqstk; u32 *isp, *prev_esp; irqstk = __this_cpu_read(softirq_stack); /* build the stack frame on the softirq stack */ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); /* Push the previous esp onto the stack */ prev_esp = (u32 *)irqstk; *prev_esp = current_stack_pointer; call_on_stack(__do_softirq, isp); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2435.29%110.00%
Steven Rostedt2333.82%220.00%
Linus Torvalds (pre-git)811.76%220.00%
Thomas Gleixner710.29%110.00%
Lai Jiangshan22.94%110.00%
Linus Torvalds22.94%110.00%
Frédéric Weisbecker11.47%110.00%
Tejun Heo11.47%110.00%
Total68100.00%10100.00%


bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) { int overflow = check_stack_overflow(); if (IS_ERR_OR_NULL(desc)) return false; if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { if (unlikely(overflow)) print_stack_overflow(); generic_handle_irq_desc(desc); } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge5380.30%120.00%
Thomas Gleixner812.12%240.00%
Linus Torvalds46.06%120.00%
Andrew Lutomirski11.52%120.00%
Total66100.00%5100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner11521.86%615.79%
Ingo Molnar10319.58%37.89%
Steven Rostedt9518.06%25.26%
Jeremy Fitzhardinge5310.08%12.63%
Lai Jiangshan458.56%12.63%
Linus Torvalds (pre-git)346.46%718.42%
Eric Dumazet213.99%12.63%
Andi Kleen183.42%12.63%
Linus Torvalds91.71%513.16%
Mitsuo Hayasaka91.71%12.63%
Zwane Mwaikambo91.71%12.63%
David Howells30.57%12.63%
Jaswinder Singh Rajput20.38%12.63%
Andrew Morton20.38%12.63%
Stéphane Eranian20.38%12.63%
Tejun Heo20.38%12.63%
Dave Jones10.19%12.63%
Frédéric Weisbecker10.19%12.63%
Greg Kroah-Hartman10.19%12.63%
Andrew Lutomirski10.19%12.63%
Total526100.00%38100.00%
Directory: arch/x86/kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.