Release 4.15 kernel/module.c
/*
Copyright (C) 2002 Richard Henderson
Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/export.h>
#include <linux/extable.h>
#include <linux/moduleloader.h>
#include <linux/trace_events.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vermagic.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/set_memory.h>
#include <asm/mmu_context.h>
#include <linux/license.h>
#include <asm/sections.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
#include <linux/livepatch.h>
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
#include <linux/dynamic_debug.h>
#include <linux/audit.h>
#include <uapi/linux/module.h>
#include "module-internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif
/*
* Modules' sections will be aligned on page boundaries
* to ensure complete separation of code and data, but
* only when CONFIG_STRICT_MODULE_RWX=y
*/
#ifdef CONFIG_STRICT_MODULE_RWX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
#endif
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/*
* Mutex protects:
* 1) List of modules (also safely readable with preempt_disable),
* 2) module_use links,
* 3) module_addr_min/module_addr_max.
* (delete and add uses RCU list operations). */
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
#ifdef CONFIG_MODULES_TREE_LOOKUP
/*
* Use a latched RB-tree for __module_address(); this allows us to use
* RCU-sched lookups of the address from any context.
*
* This is conditional on PERF_EVENTS || TRACING because those can really hit
* __module_address() hard by doing a lot of stack unwinding; potentially from
* NMI context.
*/
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
{
struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
return (unsigned long)layout->base;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 32 | 82.05% | 1 | 50.00% |
Rusty Russell | 7 | 17.95% | 1 | 50.00% |
Total | 39 | 100.00% | 2 | 100.00% |
static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
{
struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
return (unsigned long)layout->size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 32 | 82.05% | 1 | 50.00% |
Rusty Russell | 7 | 17.95% | 1 | 50.00% |
Total | 39 | 100.00% | 2 | 100.00% |
static __always_inline bool
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
{
return __mod_tree_val(a) < __mod_tree_val(b);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static __always_inline int
mod_tree_comp(void *key, struct latch_tree_node *n)
{
unsigned long val = (unsigned long)key;
unsigned long start, end;
start = __mod_tree_val(n);
if (val < start)
return -1;
end = start + __mod_tree_size(n);
if (val >= end)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 70 | 100.00% | 1 | 100.00% |
Total | 70 | 100.00% | 1 | 100.00% |
static const struct latch_tree_ops mod_tree_ops = {
.less = mod_tree_less,
.comp = mod_tree_comp,
};
static struct mod_tree_root {
struct latch_tree_root root;
unsigned long addr_min;
unsigned long addr_max;
} mod_tree __cacheline_aligned = {
.addr_min = -1UL,
};
#define module_addr_min mod_tree.addr_min
#define module_addr_max mod_tree.addr_max
static noinline void __mod_tree_insert(struct mod_tree_node *node)
{
latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static void __mod_tree_remove(struct mod_tree_node *node)
{
latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
/*
* These modifications: insert, remove_init and remove; are serialized by the
* module_mutex.
*/
static void mod_tree_insert(struct module *mod)
{
mod->core_layout.mtn.mod = mod;
mod->init_layout.mtn.mod = mod;
__mod_tree_insert(&mod->core_layout.mtn);
if (mod->init_layout.size)
__mod_tree_insert(&mod->init_layout.mtn);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 44 | 74.58% | 2 | 66.67% |
Rusty Russell | 15 | 25.42% | 1 | 33.33% |
Total | 59 | 100.00% | 3 | 100.00% |
static void mod_tree_remove_init(struct module *mod)
{
if (mod->init_layout.size)
__mod_tree_remove(&mod->init_layout.mtn);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 23 | 79.31% | 2 | 66.67% |
Rusty Russell | 6 | 20.69% | 1 | 33.33% |
Total | 29 | 100.00% | 3 | 100.00% |
static void mod_tree_remove(struct module *mod)
{
__mod_tree_remove(&mod->core_layout.mtn);
mod_tree_remove_init(mod);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 23 | 88.46% | 2 | 66.67% |
Rusty Russell | 3 | 11.54% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
static struct module *mod_find(unsigned long addr)
{
struct latch_tree_node *ltn;
ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
if (!ltn)
return NULL;
return container_of(ltn, struct mod_tree_node, node)->mod;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 57 | 100.00% | 3 | 100.00% |
Total | 57 | 100.00% | 3 | 100.00% |
#else /* MODULES_TREE_LOOKUP */
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
static void mod_tree_insert(struct module *mod) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
static void mod_tree_remove_init(struct module *mod) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
static void mod_tree_remove(struct module *mod) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
static struct module *mod_find(unsigned long addr)
{
struct module *mod;
list_for_each_entry_rcu(mod, &modules, list) {
if (within_module(addr, mod))
return mod;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
#endif /* MODULES_TREE_LOOKUP */
/*
* Bounds of module text, for speeding up __module_address.
* Protected by module_mutex.
*/
static void __mod_update_bounds(void *base, unsigned int size)
{
unsigned long min = (unsigned long)base;
unsigned long max = min + size;
if (min < module_addr_min)
module_addr_min = min;
if (max > module_addr_max)
module_addr_max = max;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static void mod_update_bounds(struct module *mod)
{
__mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
if (mod->init_layout.size)
__mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 34 | 69.39% | 1 | 50.00% |
Rusty Russell | 15 | 30.61% | 1 | 50.00% |
Total | 49 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_KGDB_KDB
struct list_head *kdb_modules = &modules;
/* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */
static void module_assert_mutex(void)
{
lockdep_assert_held(&module_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 8 | 57.14% | 1 | 50.00% |
Peter Zijlstra | 6 | 42.86% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static void module_assert_mutex_or_preempt(void)
{
#ifdef CONFIG_LOCKDEP
if (unlikely(!debug_locks))
return;
WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
!lockdep_is_held(&module_mutex));
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 28 | 77.78% | 1 | 33.33% |
Rusty Russell | 7 | 19.44% | 1 | 33.33% |
Steven Rostedt | 1 | 2.78% | 1 | 33.33% |
Total | 36 | 100.00% | 3 | 100.00% |
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
#ifndef CONFIG_MODULE_SIG_FORCE
module_param(sig_enforce, bool_enable_only, 0644);
#endif /* !CONFIG_MODULE_SIG_FORCE */
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
* on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
*/
bool is_module_sig_enforced(void)
{
return sig_enforce;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bruno E. O. Meneguele | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(is_module_sig_enforced);
/* Block module loading/unloading? */
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
int register_module_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&module_notify_list, nb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Levon | 17 | 89.47% | 1 | 50.00% |
Alan Stern | 2 | 10.53% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(register_module_notifier);
int unregister_module_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&module_notify_list, nb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Levon | 17 | 89.47% | 1 | 50.00% |
Alan Stern | 2 | 10.53% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(unregister_module_notifier);
struct load_info {
const char *name;
Elf_Ehdr *hdr;
unsigned long len;
Elf_Shdr *sechdrs;
char *secstrings, *strtab;
unsigned long symoffs, stroffs;
struct _ddebug *debug;
unsigned int num_debug;
bool sig_ok;
#ifdef CONFIG_KALLSYMS
unsigned long mod_kallsyms_init_off;
#endif
struct {
unsigned int sym, str, mod, vers, info, pcpu;
} index;
};
/*
* We require a truly strong try_module_get(): 0 means success.
* Otherwise an error is returned due to ongoing or failed
* initialization etc.
*/
static inline int strong_try_module_get(struct module *mod)
{
BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
if (mod && mod->state == MODULE_STATE_COMING)
return -EBUSY;
if (try_module_get(mod))
return 0;
else
return -ENOENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 52 | 100.00% | 3 | 100.00% |
Total | 52 | 100.00% | 3 | 100.00% |
static inline void add_taint_module(struct module *mod, unsigned flag,
enum lockdep_ok lockdep_ok)
{
add_taint(flag, lockdep_ok);
set_bit(flag, &mod->taints);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florin Malita | 24 | 66.67% | 1 | 33.33% |
Petr Mladek | 6 | 16.67% | 1 | 33.33% |
Rusty Russell | 6 | 16.67% | 1 | 33.33% |
Total | 36 | 100.00% | 3 | 100.00% |
/*
* A thread that wants to hold a reference to a module only while it
* is running can call this to safely exit. nfsd and lockd use this.
*/
void __noreturn __module_put_and_exit(struct module *mod, long code)
{
module_put(mod);
do_exit(code);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 23 | 95.83% | 1 | 50.00% |
Jiri Kosina | 1 | 4.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(__module_put_and_exit);
/* Find a module section: 0 means not found. */
static unsigned int find_sec(const struct load_info *info, const char *name)
{
unsigned int i;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i];
/* Alloc bit cleared means "ignore it." */
if ((shdr->sh_flags & SHF_ALLOC)
&& strcmp(info->secstrings + shdr->sh_name, name) == 0)
return i;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 85 | 100.00% | 2 | 100.00% |
Total | 85 | 100.00% | 2 | 100.00% |
/* Find a module section, or NULL. */
static void *section_addr(const struct load_info *info, const char *name)
{
/* Section 0 has sh_addr 0. */
return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 38 | 100.00% | 2 | 100.00% |
Total | 38 | 100.00% | 2 | 100.00% |
/* Find a module section, or NULL. Fill in number of "objects" in section. */
static void *section_objs(const struct load_info *info,
const char *name,
size_t object_size,
unsigned int *num)
{
unsigned int sec = find_sec(info, name);
/* Section 0 has sh_addr 0 and sh_size 0. */
*num = info->sechdrs[sec].sh_size / object_size;
return (void *)info->sechdrs[sec].sh_addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 66 | 100.00% | 2 | 100.00% |
Total | 66 | 100.00% | 2 | 100.00% |
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
extern const s32 __start___kcrctab[];
extern const s32 __start___kcrctab_gpl[];
extern const s32 __start___kcrctab_gpl_future[];
#ifdef CONFIG_UNUSED_SYMBOLS
extern const struct kernel_symbol __start___ksymtab_unused[];
extern const struct kernel_symbol __stop___ksymtab_unused[];
extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
extern const s32 __start___kcrctab_unused[];
extern const s32 __start___kcrctab_unused_gpl[];
#endif
#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
#else
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
#endif
static bool each_symbol_in_section(const struct symsearch *arr,
unsigned int arrsize,
struct module *owner,
bool (*fn)(const struct symsearch *syms,
struct module *owner,
void *data),
void *data)
{
unsigned int j;
for (j = 0; j < arrsize; j++) {
if (fn(&arr[j], owner, data))
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 60 | 68.97% | 1 | 50.00% |
Sam Ravnborg | 27 | 31.03% | 1 | 50.00% |
Total | 87 | 100.00% | 2 | 100.00% |
/* Returns true as soon as fn returns true, otherwise false. */
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
struct module *owner,
void *data),
void *data)
{
struct module *mod;
static const struct symsearch arr[] = {
{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
NOT_GPL_ONLY, false },
{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
__start___kcrctab_gpl,
GPL_ONLY, false },
{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
__start___kcrctab_gpl_future,
WILL_BE_GPL_ONLY, false },
#ifdef CONFIG_UNUSED_SYMBOLS
{ __start___ksymtab_unused, __stop___ksymtab_unused,
__start___kcrctab_unused,
NOT_GPL_ONLY, true },
{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
__start___kcrctab_unused_gpl,
GPL_ONLY, true },
#endif
};
module_assert_mutex_or_preempt();
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
return true;
list_for_each_entry_rcu(mod, &modules, list) {
struct symsearch arr[] = {
{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
NOT_GPL_ONLY, false },
{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
mod->gpl_crcs,
GPL_ONLY, false },
{ mod->gpl_future_syms,
mod->gpl_future_syms + mod->num_gpl_future_syms,
mod->gpl_future_crcs,
WILL_BE_GPL_ONLY, false },
#ifdef CONFIG_UNUSED_SYMBOLS
{ mod->unused_syms,
mod->unused_syms + mod->num_unused_syms,
mod->unused_crcs,
NOT_GPL_ONLY, true },
{ mod->unused_gpl_syms,
mod->unused_gpl_syms + mod->num_unused_gpl_syms,
mod->unused_gpl_crcs,
GPL_ONLY, true },
#endif
};
if (mod->state == MODULE_STATE_UNFORMED)
continue;
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 248 | 82.39% | 7 | 43.75% |
Sam Ravnborg | 18 | 5.98% | 1 | 6.25% |
Denys Vlasenko | 14 | 4.65% | 1 | 6.25% |
Arjan van de Ven | 8 | 2.66% | 1 | 6.25% |
Linus Torvalds (pre-git) | 5 | 1.66% | 1 | 6.25% |
Peter Zijlstra | 3 | 1.00% | 1 | 6.25% |
Greg Kroah-Hartman | 2 | 0.66% | 1 | 6.25% |
Linus Torvalds | 1 | 0.33% | 1 | 6.25% |
Andi Kleen | 1 | 0.33% | 1 | 6.25% |
David Mosberger-Tang | 1 | 0.33% | 1 | 6.25% |
Total | 301 | 100.00% | 16 | 100.00% |
EXPORT_SYMBOL_GPL(each_symbol_section);
struct find_symbol_arg {
/* Input */
const char *name;
bool gplok;
bool warn;
/* Output */
struct module *owner;
const s32 *crc;
const struct kernel_symbol *sym;
};
static bool check_symbol(const struct symsearch *syms,
struct module *owner,
unsigned int symnum, void *data)
{
struct find_symbol_arg *fsa = data;
if (!fsa->gplok) {
if (syms->licence == GPL_ONLY)
return false;
if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
pr_warn("Symbol %s is being used by a non-GPL module, "
"which will not be allowed in the future\n",
fsa->name);
}
}
#ifdef CONFIG_UNUSED_SYMBOLS
if (syms->unused && fsa->warn) {
pr_warn("Symbol %s is marked as UNUSED, however this module is "
"using it.\n", fsa->name);
pr_warn("This symbol will go away in the future.\n");
pr_warn("Please evaluate if this is the right api to use and "
"if it really is, submit a report to the linux kernel "
"mailing list together with submitting your code for "
"inclusion.\n");
}
#endif
fsa->owner = owner;
fsa->crc = symversion(syms->crcs, symnum);
fsa->sym = &syms->start[symnum];
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 123 | 82.00% | 2 | 28.57% |
Arjan van de Ven | 9 | 6.00% | 1 | 14.29% |
Andrew Morton | 8 | 5.33% | 1 | 14.29% |
Denys Vlasenko | 5 | 3.33% | 1 | 14.29% |
Yannick Guerrini | 3 | 2.00% | 1 | 14.29% |
Tim Abbott | 2 | 1.33% | 1 | 14.29% |
Total | 150 | 100.00% | 7 | 100.00% |
static int cmp_name(const void *va, const void *vb)
{
const char *a;
const struct kernel_symbol *b;
a = va; b = vb;
return strcmp(a, b->name);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alessio Igor Bogani | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
static bool find_symbol_in_section(const struct symsearch *syms,
struct module *owner,
void *data)
{
struct find_symbol_arg *fsa = data;
struct kernel_symbol *sym;
sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
sizeof(struct kernel_symbol), cmp_name);
if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 53 | 59.55% | 1 | 50.00% |
Alessio Igor Bogani | 36 | 40.45% | 1 | 50.00% |
Total | 89 | 100.00% | 2 | 100.00% |
/* Find a symbol and return it, along with, (optional) crc and
* (optional) module which owns it. Needs preempt disabled or module_mutex. */
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const s32 **crc,
bool gplok,
bool warn)
{
struct find_symbol_arg fsa;
fsa.name = name;
fsa.gplok = gplok;
fsa.warn = warn;
if (each_symbol_section(find_symbol_in_section, &fsa)) {
if (owner)
*owner = fsa.owner;
if (crc)
*crc = fsa.crc;
return fsa.sym;
}
pr_debug("Failed to find symbol %s\n", name);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 73 | 71.57% | 4 | 40.00% |
Greg Kroah-Hartman | 14 | 13.73% | 1 | 10.00% |
Tim Abbott | 6 | 5.88% | 1 | 10.00% |
Linus Torvalds (pre-git) | 5 | 4.90% | 1 | 10.00% |
Arjan van de Ven | 2 | 1.96% | 1 | 10.00% |
Ard Biesheuvel | 1 | 0.98% | 1 | 10.00% |
Jim Cromie | 1 | 0.98% | 1 | 10.00% |
Total | 102 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(find_symbol);
/*
* Search for module by name: must hold module_mutex (or preempt disabled
* for read-only access).
*/
static struct module *find_module_all(const char *name, size_t len,
bool even_unformed)
{
struct module *mod;
module_assert_mutex_or_preempt();
list_for_each_entry_rcu(mod, &modules, list) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
continue;
if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
return mod;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 38 | 48.72% | 3 | 42.86% |
Linus Torvalds (pre-git) | 21 | 26.92% | 1 | 14.29% |
Mathias Krause | 16 | 20.51% | 1 | 14.29% |
Peter Zijlstra | 2 | 2.56% | 1 | 14.29% |
Luis R. Rodriguez | 1 | 1.28% | 1 | 14.29% |
Total | 78 | 100.00% | 7 | 100.00% |
struct module *find_module(const char *name)
{
module_assert_mutex();
return find_module_all(name, strlen(name), false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 23 | 82.14% | 2 | 66.67% |
Mathias Krause | 5 | 17.86% | 1 | 33.33% |
Total | 28 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
static inline void __percpu *mod_percpu(struct module *mod)
{
return mod->percpu;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 19 | 100.00% | 2 | 100.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static int percpu_modalloc(struct module *mod, struct load_info *info)
{
Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
unsigned long align = pcpusec->sh_addralign;
if (!pcpusec->sh_size)
return 0;
if (align > PAGE_SIZE) {
pr_warn("%s: per-cpu alignment %li > %li\n",
mod->name, align, PAGE_SIZE);
align = PAGE_SIZE;
}
mod->percpu =