cregit-Linux how code gets into the kernel

Release 4.13 kernel/extable.c

Directory: kernel
/* Rewritten by Rusty Russell, on the backs of many others...
   Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
*/
#include <linux/ftrace.h>
#include <linux/memory.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/filter.h>

#include <asm/sections.h>
#include <linux/uaccess.h>

/*
 * mutex protecting text section modification (dynamic code patching).
 * some users need to sleep (allocating memory...) while they hold this lock.
 *
 * NOT exported to modules - patching kernel text is a really delicate matter.
 */

DEFINE_MUTEX(text_mutex);

extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[];

/* Cleared by build time tools if the table is already sorted. */

u32 __initdata __visible main_extable_sort_needed = 1;

/* Sort the kernel's built-in exception table */

void __init sort_main_extable(void) { if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) { pr_notice("Sorting __ex_table...\n"); sort_extable(__start___ex_table, __stop___ex_table); } }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras1550.00%125.00%
Borislav Petkov723.33%125.00%
Uwe Kleine-König413.33%125.00%
David Daney413.33%125.00%
Total30100.00%4100.00%

/* Given an address, look for it in the exception tables. */
const struct exception_table_entry *search_exception_tables(unsigned long addr) { const struct exception_table_entry *e; e = search_extable(__start___ex_table, __stop___ex_table - __start___ex_table, addr); if (!e) e = search_module_extables(addr); return e; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell3576.09%250.00%
Linus Torvalds1021.74%125.00%
Thomas Meyer12.17%125.00%
Total46100.00%4100.00%


static inline int init_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_sinittext && addr < (unsigned long)_einittext) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell2571.43%133.33%
Ingo Molnar925.71%133.33%
Helge Deller12.86%133.33%
Total35100.00%3100.00%


int notrace core_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)_etext) return 1; if (system_state < SYSTEM_RUNNING && init_kernel_text(addr)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2245.83%114.29%
Andrew Morton1837.50%114.29%
Rusty Russell48.33%228.57%
Thomas Gleixner24.17%114.29%
Marcin Nowakowski12.08%114.29%
Helge Deller12.08%114.29%
Total48100.00%7100.00%

/** * core_kernel_data - tell if addr points to kernel data * @addr: address to test * * Returns true if @addr passed in is from the core kernel data * section. * * Note: On some archs it may return true for core RODATA, and false * for others. But will always be true for core RW data. */
int core_kernel_data(unsigned long addr) { if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt33100.00%2100.00%
Total33100.00%2100.00%


int __kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) return 1; if (is_module_text_address(addr)) return 1; if (is_ftrace_trampoline(addr)) return 1; if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) return 1; if (is_bpf_text_address(addr)) return 1; /* * There might be init symbols in saved stacktraces. * Give those symbols a chance to be printed in * backtraces (such as lockdep traces). * * Since we are after the module-symbols check, there's * no danger of address overlap: */ if (init_kernel_text(addr)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell2430.77%228.57%
Ingo Molnar1924.36%114.29%
Masami Hiramatsu1519.23%114.29%
Steven Rostedt1012.82%114.29%
Daniel Borkmann810.26%114.29%
Linus Torvalds22.56%114.29%
Total78100.00%7100.00%


int kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) return 1; if (is_module_text_address(addr)) return 1; if (is_ftrace_trampoline(addr)) return 1; if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) return 1; if (is_bpf_text_address(addr)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell2435.82%342.86%
Masami Hiramatsu2334.33%114.29%
Steven Rostedt1014.93%114.29%
Linus Torvalds57.46%114.29%
Daniel Borkmann57.46%114.29%
Total67100.00%7100.00%

/* * On some architectures (PPC64, IA64) function pointers * are actually only tokens to some data that then holds the * real function address. As a result, to find if a function * pointer is part of the kernel text, we need to do some * special dereferencing first. */
int func_ptr_is_kernel_text(void *ptr) { unsigned long addr; addr = (unsigned long) dereference_function_descriptor(ptr); if (core_kernel_text(addr)) return 1; return is_module_text_address(addr); }

Contributors

PersonTokensPropCommitsCommitProp
Arjan van de Ven3997.50%150.00%
Rusty Russell12.50%150.00%
Total40100.00%2100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell11927.17%722.58%
Ingo Molnar6214.16%26.45%
Steven Rostedt5412.33%39.68%
Masami Hiramatsu419.36%13.23%
Arjan van de Ven409.13%13.23%
Linus Torvalds337.53%39.68%
Paul Mackerras184.11%13.23%
Andrew Morton184.11%13.23%
Daniel Borkmann163.65%13.23%
David Daney112.51%13.23%
Borislav Petkov71.60%13.23%
Uwe Kleine-König40.91%13.23%
Frédéric Weisbecker40.91%13.23%
Paul Gortmaker30.68%13.23%
Helge Deller20.46%13.23%
Thomas Gleixner20.46%13.23%
Marcin Nowakowski10.23%13.23%
Dmitri Vorobiev10.23%13.23%
Andi Kleen10.23%13.23%
Thomas Meyer10.23%13.23%
Total438100.00%31100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.