cregit-Linux how code gets into the kernel

Release 4.10 kernel/kmod.c

Directory: kernel
/*
        kmod, the new module loader (replaces kerneld)
        Kirk Petersen

        Reorganized not to be a daemon by Adam Richter, with guidance
        from Greg Zornetzer.

        Modified to avoid chroot and file sharing problems.
        Mikael Pettersson

        Limit the concurrent number of kmod modprobes to catch loops from
        "modprobe needs a service that is in a module".
        Keith Owens <kaos@ocs.com.au> December 1999

        Unblock all signals when we exec a usermode process.
        Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000

        call_usermodehelper wait flag, and remove exec_usermodehelper.
        Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/cred.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/rwsem.h>
#include <linux/ptrace.h>
#include <linux/async.h>
#include <linux/uaccess.h>

#include <trace/events/module.h>

extern int max_threads;


#define CAP_BSET	(void *)1

#define CAP_PI		(void *)2


static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;

static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
static DECLARE_RWSEM(umhelper_sem);

#ifdef CONFIG_MODULES

/*
        modprobe_path is set via /proc/sys.
*/

char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";


static void free_modprobe_argv(struct subprocess_info *info) { kfree(info->argv[3]); /* check call_modprobe() */ kfree(info->argv); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov29100.00%1100.00%
Total29100.00%1100.00%


static int call_modprobe(char *module_name, int wait) { struct subprocess_info *info; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); if (!argv) goto out; module_name = kstrdup(module_name, GFP_KERNEL); if (!module_name) goto free_argv; argv[0] = modprobe_path; argv[1] = "-q"; argv[2] = "--"; argv[3] = module_name; /* check free_modprobe_argv() */ argv[4] = NULL; info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL, NULL, free_modprobe_argv, NULL); if (!info) goto free_module_name; return call_usermodehelper_exec(info, wait | UMH_KILLABLE); free_module_name: kfree(module_name); free_argv: kfree(argv); out: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov13780.12%266.67%
lucas de marchilucas de marchi3419.88%133.33%
Total171100.00%3100.00%

/** * __request_module - try to load a kernel module * @wait: wait (or not) for the operation to complete * @fmt: printf style format string for the name of the module * @...: arguments as specified in the format string * * Load a module using the user mode module loader. The function returns * zero on success or a negative errno code or positive exit code from * "modprobe" on failure. Note that a successful module load does not mean * the module did not then unload and exit on an error of its own. Callers * must check that the service they requested is now available not blindly * invoke it. * * If module auto-loading support is disabled then this function * becomes a no-operation. */
int __request_module(bool wait, const char *fmt, ...) { va_list args; char module_name[MODULE_NAME_LEN]; unsigned int max_modprobes; int ret; static atomic_t kmod_concurrent = ATOMIC_INIT(0); #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ static int kmod_loop_msg; /* * We don't allow synchronous module loading from async. Module * init may invoke async_synchronize_full() which will end up * waiting for this task which already is waiting for the module * loading to complete, leading to a deadlock. */ WARN_ON_ONCE(wait && current_is_async()); if (!modprobe_path[0]) return 0; va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); if (ret >= MODULE_NAME_LEN) return -ENAMETOOLONG; ret = security_kernel_module_request(module_name); if (ret) return ret; /* If modprobe needs a service that is in a module, we get a recursive * loop. Limit the number of running kmod threads to max_threads/2 or * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method * would be to run the parents of this process, counting how many times * kmod was invoked. That would mean accessing the internals of the * process tables to get the command line, proc_pid_cmdline is static * and it is not worth changing the proc code just to handle this case. * KAO. * * "trace the ppid" is simple, but will fail if someone's * parent exits. I think this is as good as it gets. --RR */ max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT); atomic_inc(&kmod_concurrent); if (atomic_read(&kmod_concurrent) > max_modprobes) { /* We may be blaming an innocent here, but unlikely */ if (kmod_loop_msg < 5) { printk(KERN_ERR "request_module: runaway loop modprobe %s\n", module_name); kmod_loop_msg++; } atomic_dec(&kmod_concurrent); return -ENOMEM; } trace_module_request(module_name, wait, _RET_IP_); ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); atomic_dec(&kmod_concurrent); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
rusty russellrusty russell5928.23%212.50%
linus torvaldslinus torvalds4521.53%16.25%
pre-gitpre-git4320.57%531.25%
eric pariseric paris146.70%16.25%
oleg nesterovoleg nesterov136.22%212.50%
arjan van de venarjan van de ven94.31%16.25%
tejun heotejun heo94.31%16.25%
li zefanli zefan94.31%16.25%
jiri kosinajiri kosina52.39%16.25%
ingo molnaringo molnar31.44%16.25%
Total209100.00%16100.00%

EXPORT_SYMBOL(__request_module); #endif /* CONFIG_MODULES */
static void call_usermodehelper_freeinfo(struct subprocess_info *info) { if (info->cleanup) (*info->cleanup)(info); kfree(info); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky32100.00%1100.00%
Total32100.00%1100.00%


static void umh_complete(struct subprocess_info *sub_info) { struct completion *comp = xchg(&sub_info->complete, NULL); /* * See call_usermodehelper_exec(). If xchg() returns NULL * we own sub_info, the UMH_KILLABLE caller has gone away * or the caller used UMH_NO_WAIT. */ if (comp) complete(comp); else call_usermodehelper_freeinfo(sub_info); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky42100.00%1100.00%
Total42100.00%1100.00%

/* * This is the task which runs the usermode application */
static int call_usermodehelper_exec_async(void *data) { struct subprocess_info *sub_info = data; struct cred *new; int retval; spin_lock_irq(&current->sighand->siglock); flush_signal_handlers(current, 1); spin_unlock_irq(&current->sighand->siglock); /* * Our parent (unbound workqueue) runs with elevated scheduling * priority. Avoid propagating that into the userspace child. */ set_user_nice(current, 0); retval = -ENOMEM; new = prepare_kernel_cred(current); if (!new) goto out; spin_lock(&umh_sysctl_lock); new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); new->cap_inheritable = cap_intersect(usermodehelper_inheritable, new->cap_inheritable); spin_unlock(&umh_sysctl_lock); if (sub_info->init) { retval = sub_info->init(sub_info, new); if (retval) { abort_creds(new); goto out; } } commit_creds(new); retval = do_execve(getname_kernel(sub_info->path), (const char __user *const __user *)sub_info->argv, (const char __user *const __user *)sub_info->envp); out: sub_info->retval = retval; /* * call_usermodehelper_exec_sync() will call umh_complete * if UHM_WAIT_PROC. */ if (!(sub_info->wait & UMH_WAIT_PROC)) umh_complete(sub_info); if (!retval) return 0; do_exit(0); }

Contributors

PersonTokensPropCommitsCommitProp
eric pariseric paris6728.88%15.88%
david howellsdavid howells4619.83%211.76%
pre-gitpre-git3615.52%317.65%
rusty russellrusty russell2510.78%15.88%
martin schwidefskymartin schwidefsky208.62%15.88%
al viroal viro177.33%317.65%
jan engelhardtjan engelhardt73.02%15.88%
oleg nesterovoleg nesterov62.59%15.88%
linus torvaldslinus torvalds52.16%211.76%
frederic weisbeckerfrederic weisbecker31.29%211.76%
Total232100.00%17100.00%

/* Handles UMH_WAIT_PROC. */
static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info) { pid_t pid; /* If SIGCLD is ignored sys_wait4 won't populate the status. */ kernel_sigaction(SIGCHLD, SIG_DFL); pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD); if (pid < 0) { sub_info->retval = pid; } else { int ret = -ECHILD; /* * Normally it is bogus to call wait4() from in-kernel because * wait4() wants to write the exit code to a userspace address. * But call_usermodehelper_exec_sync() always runs as kernel * thread (workqueue) and put_user() to a kernel address works * OK for kernel threads, due to their having an mm_segment_t * which spans the entire address space. * * Thus the __user pointer cast is valid here. */ sys_wait4(pid, (int __user *)&ret, 0, NULL); /* * If ret is 0, either call_usermodehelper_exec_async failed and * the real error code is already in sub_info->retval or * sub_info->retval is 0 anyway, so don't mess with it then. */ if (ret) sub_info->retval = ret; } /* Restore default kernel sig handler */ kernel_sigaction(SIGCHLD, SIG_IGN); umh_complete(sub_info); }

Contributors

PersonTokensPropCommitsCommitProp
rusty russellrusty russell4646.94%111.11%
frederic weisbeckerfrederic weisbecker1414.29%222.22%
bjorn steinbrinkbjorn steinbrink1414.29%111.11%
linus torvaldslinus torvalds99.18%111.11%
oleg nesterovoleg nesterov77.14%222.22%
andrew mortonandrew morton55.10%111.11%
al viroal viro33.06%111.11%
Total98100.00%9100.00%

/* * We need to create the usermodehelper kernel thread from a task that is affine * to an optimized set of CPUs (or nohz housekeeping ones) such that they * inherit a widest affinity irrespective of call_usermodehelper() callers with * possibly reduced affinity (eg: per-cpu workqueues). We don't want * usermodehelper targets to contend a busy CPU. * * Unbound workqueues provide such wide affinity and allow to block on * UMH_WAIT_PROC requests without blocking pending request (up to some limit). * * Besides, workqueues provide the privilege level that caller might not have * to perform the usermodehelper request. * */
static void call_usermodehelper_exec_work(struct work_struct *work) { struct subprocess_info *sub_info = container_of(work, struct subprocess_info, work); if (sub_info->wait & UMH_WAIT_PROC) { call_usermodehelper_exec_sync(sub_info); } else { pid_t pid; /* * Use CLONE_PARENT to reparent it to kthreadd; we do not * want to pollute current->children, and we need a parent * that always ignores SIGCHLD to ensure auto-reaping. */ pid = kernel_thread(call_usermodehelper_exec_async, sub_info, CLONE_PARENT | SIGCHLD); if (pid < 0) { sub_info->retval = pid; umh_complete(sub_info); } } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3341.25%216.67%
oleg nesterovoleg nesterov1316.25%433.33%
david howellsdavid howells1215.00%18.33%
frederic weisbeckerfrederic weisbecker1113.75%216.67%
rusty russellrusty russell911.25%18.33%
jeremy fitzhardingejeremy fitzhardinge11.25%18.33%
martin schwidefskymartin schwidefsky11.25%18.33%
Total80100.00%12100.00%

/* * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY * (used for preventing user land processes from being created after the user * land has been frozen during a system-wide hibernation or suspend operation). * Should always be manipulated under umhelper_sem acquired for write. */ static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; /* Number of helpers running */ static atomic_t running_helpers = ATOMIC_INIT(0); /* * Wait queue head used by usermodehelper_disable() to wait for all running * helpers to finish. */ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); /* * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled * to become 'false'. */ static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); /* * Time to wait for running_helpers to become zero before the setting of * usermodehelper_disabled in usermodehelper_disable() fails */ #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
int usermodehelper_read_trylock(void) { DEFINE_WAIT(wait); int ret = 0; down_read(&umhelper_sem); for (;;) { prepare_to_wait(&usermodehelper_disabled_waitq, &wait, TASK_INTERRUPTIBLE); if (!usermodehelper_disabled) break; if (usermodehelper_disabled == UMH_DISABLED) ret = -EAGAIN; up_read(&umhelper_sem); if (ret) break; schedule(); try_to_freeze(); down_read(&umhelper_sem); } finish_wait(&usermodehelper_disabled_waitq, &wait); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
rafael j. wysockirafael j. wysocki8288.17%266.67%
srivatsa s. bhatsrivatsa s. bhat1111.83%133.33%
Total93100.00%3100.00%

EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
long usermodehelper_read_lock_wait(long timeout) { DEFINE_WAIT(wait); if (timeout < 0) return -EINVAL; down_read(&umhelper_sem); for (;;) { prepare_to_wait(&usermodehelper_disabled_waitq, &wait, TASK_UNINTERRUPTIBLE); if (!usermodehelper_disabled) break; up_read(&umhelper_sem); timeout = schedule_timeout(timeout); if (!timeout) break; down_read(&umhelper_sem); } finish_wait(&usermodehelper_disabled_waitq, &wait); return timeout; }

Contributors

PersonTokensPropCommitsCommitProp
rafael j. wysockirafael j. wysocki90100.00%1100.00%
Total90100.00%1100.00%

EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
void usermodehelper_read_unlock(void) { up_read(&umhelper_sem); }

Contributors

PersonTokensPropCommitsCommitProp
srivatsa s. bhatsrivatsa s. bhat1292.31%150.00%
rafael j. wysockirafael j. wysocki17.69%150.00%
Total13100.00%2100.00%

EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); /** * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. * @depth: New value to assign to usermodehelper_disabled. * * Change the value of usermodehelper_disabled (under umhelper_sem locked for * writing) and wakeup tasks waiting for it to change. */
void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) { down_write(&umhelper_sem); usermodehelper_disabled = depth; wake_up(&usermodehelper_disabled_waitq); up_write(&umhelper_sem); }

Contributors

PersonTokensPropCommitsCommitProp
rafael j. wysockirafael j. wysocki31100.00%2100.00%
Total31100.00%2100.00%

/** * __usermodehelper_disable - Prevent new helpers from being started. * @depth: New value to assign to usermodehelper_disabled. * * Set usermodehelper_disabled to @depth and wait for running helpers to exit. */
int __usermodehelper_disable(enum umh_disable_depth depth) { long retval; if (!depth) return -EINVAL; down_write(&umhelper_sem); usermodehelper_disabled = depth; up_write(&umhelper_sem); /* * From now on call_usermodehelper_exec() won't start any new * helpers, so it is sufficient if running_helpers turns out to * be zero at one point (it may be increased later, but that * doesn't matter). */ retval = wait_event_timeout(running_helpers_waitq, atomic_read(&running_helpers) == 0, RUNNING_HELPERS_TIMEOUT); if (retval) return 0; __usermodehelper_set_disable_depth(UMH_ENABLED); return -EAGAIN; }

Contributors

PersonTokensPropCommitsCommitProp
rafael j. wysockirafael j. wysocki5983.10%480.00%
srivatsa s. bhatsrivatsa s. bhat1216.90%120.00%
Total71100.00%5100.00%


static void helper_lock(void) { atomic_inc(&running_helpers); smp_mb__after_atomic(); }

Contributors

PersonTokensPropCommitsCommitProp
rafael j. wysockirafael j. wysocki1694.12%150.00%
peter zijlstrapeter zijlstra15.88%150.00%
Total17100.00%2100.00%


static void helper_unlock(void) { if (atomic_dec_and_test(&running_helpers)) wake_up(&running_helpers_waitq); }

Contributors

PersonTokensPropCommitsCommitProp
rafael j. wysockirafael j. wysocki22100.00%1100.00%
Total22100.00%1100.00%

/** * call_usermodehelper_setup - prepare to call a usermode helper * @path: path to usermode executable * @argv: arg vector for process * @envp: environment for process * @gfp_mask: gfp mask for memory allocation * @cleanup: a cleanup function * @init: an init function * @data: arbitrary context sensitive data * * Returns either %NULL on allocation failure, or a subprocess_info * structure. This should be passed to call_usermodehelper_exec to * exec the process and free the structure. * * The init function is used to customize the helper process prior to * exec. A non-zero return code causes the process to error out, exit, * and return the failure to the calling process * * The cleanup function is just before ethe subprocess_info is about to * be freed. This can be used for freeing the argv and envp. The * Function must be runnable in either a process context or the * context in which call_usermodehelper_exec is called. */
struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data) { struct subprocess_info *sub_info; sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); if (!sub_info) goto out; INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); sub_info->path = path; sub_info->argv = argv; sub_info->envp = envp; sub_info->cleanup = cleanup; sub_info->init = init; sub_info->data = data; out: return sub_info; }

Contributors

PersonTokensPropCommitsCommitProp
lucas de marchilucas de marchi4029.85%111.11%
andi kleenandi kleen3223.88%111.11%
pre-gitpre-git2518.66%111.11%
jeremy fitzhardingejeremy fitzhardinge139.70%111.11%
neil hormanneil horman107.46%111.11%
david howellsdavid howells64.48%111.11%
kosaki motohirokosaki motohiro42.99%111.11%
art haasart haas32.24%111.11%
frederic weisbeckerfrederic weisbecker10.75%111.11%
Total134100.00%9100.00%

EXPORT_SYMBOL(call_usermodehelper_setup); /** * call_usermodehelper_exec - start a usermode application * @sub_info: information about the subprocessa * @wait: wait for the application to finish and return status. * when UMH_NO_WAIT don't wait at all, but you get no useful error back * when the program couldn't be exec'ed. This makes it safe to call * from interrupt context. * * Runs a user-space application. The application is started * asynchronously if wait is not set, and runs as a child of system workqueues. * (ie. it runs with full root capabilities and optimized affinity). */
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) { DECLARE_COMPLETION_ONSTACK(done); int retval = 0; if (!sub_info->path) { call_usermodehelper_freeinfo(sub_info); return -EINVAL; } helper_lock(); if (usermodehelper_disabled) { retval = -EBUSY; goto out; } /* * Set the completion pointer only if there is a waiter. * This makes it possible to use umh_complete to free * the data structure in case of UMH_NO_WAIT. */ sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; sub_info->wait = wait; queue_work(system_unbound_wq, &sub_info->work); if (wait == UMH_NO_WAIT) /* task has freed sub_info */ goto unlock; if (wait & UMH_KILLABLE) { retval = wait_for_completion_killable(&done); if (!retval) goto wait_done; /* umh_complete() will see NULL and free sub_info */ if (xchg(&sub_info->complete, NULL)) goto unlock; /* fallthrough, umh_complete() was already called */ } wait_for_completion(&done); wait_done: retval = sub_info->retval; out: call_usermodehelper_freeinfo(sub_info); unlock: helper_unlock(); return retval; }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov4526.47%315.79%
jeremy fitzhardingejeremy fitzhardinge4325.29%210.53%
andi kleenandi kleen1911.18%15.26%
tetsuo handatetsuo handa1810.59%15.26%
pre-gitpre-git127.06%210.53%
martin schwidefskymartin schwidefsky95.29%15.26%
rafael j. wysockirafael j. wysocki74.12%210.53%
nigel cunninghamnigel cunningham74.12%15.26%
rusty russellrusty russell31.76%15.26%
ingo molnaringo molnar21.18%15.26%
andrew mortonandrew morton21.18%15.26%
linus torvaldslinus torvalds10.59%15.26%
david howellsdavid howells10.59%15.26%
frederic weisbeckerfrederic weisbecker10.59%15.26%
Total170100.00%19100.00%

EXPORT_SYMBOL(call_usermodehelper_exec); /** * call_usermodehelper() - prepare and start a usermode application * @path: path to usermode executable * @argv: arg vector for process * @envp: environment for process * @wait: wait for the application to finish and return status. * when UMH_NO_WAIT don't wait at all, but you get no useful error back * when the program couldn't be exec'ed. This makes it safe to call * from interrupt context. * * This function is the equivalent to use call_usermodehelper_setup() and * call_usermodehelper_exec(). */
int call_usermodehelper(char *path, char **argv, char **envp, int wait) { struct subprocess_info *info; gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; info = call_usermodehelper_setup(path, argv, envp, gfp_mask, NULL, NULL, NULL); if (info == NULL) return -ENOMEM; return call_usermodehelper_exec(info, wait); }

Contributors

PersonTokensPropCommitsCommitProp
boaz harroshboaz harrosh7090.91%133.33%
lucas de marchilucas de marchi79.09%266.67%
Total77100.00%3100.00%

EXPORT_SYMBOL(call_usermodehelper);
static int proc_cap_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; kernel_cap_t new_cap; int err, i; if (write && (!capable(CAP_SETPCAP) || !capable(CAP_SYS_MODULE))) return -EPERM; /* * convert from the global kernel_cap_t to the ulong array to print to * userspace if this is a read. */ spin_lock(&umh_sysctl_lock); for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { if (table->data == CAP_BSET) cap_array[i] = usermodehelper_bset.cap[i]; else if (table->data == CAP_PI) cap_array[i] = usermodehelper_inheritable.cap[i]; else BUG(); } spin_unlock(&umh_sysctl_lock); t = *table; t.data = &cap_array; /* * actually read or write and array of ulongs from userspace. Remember * these are least significant 32 bits first */ err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; /* * convert from the sysctl array of ulongs to the kernel_cap_t * internal representation */ for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) new_cap.cap[i] = cap_array[i]; /* * Drop everything not in the new_cap (but don't add things) */ spin_lock(&umh_sysctl_lock); if (write) { if (table->data == CAP_BSET) usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); if (table->data == CAP_PI) usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); } spin_unlock(&umh_sysctl_lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
eric pariseric paris261100.00%1100.00%
Total261100.00%1100.00%

struct ctl_table usermodehelper_table[] = { { .procname = "bset", .data = CAP_BSET, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { .procname = "inheritable", .data = CAP_PI, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { } };

Overall Contributors

PersonTokensPropCommitsCommitProp
eric pariseric paris44620.75%22.30%
rafael j. wysockirafael j. wysocki35516.52%66.90%
oleg nesterovoleg nesterov25011.63%1213.79%
pre-gitpre-git1848.56%910.34%
rusty russellrusty russell1476.84%44.60%
martin schwidefskymartin schwidefsky1044.84%11.15%
lucas de marchilucas de marchi944.37%33.45%
linus torvaldslinus torvalds713.30%910.34%
boaz harroshboaz harrosh703.26%11.15%
david howellsdavid howells653.02%33.45%
jeremy fitzhardingejeremy fitzhardinge572.65%22.30%
srivatsa s. bhatsrivatsa s. bhat552.56%22.30%
andi kleenandi kleen542.51%22.30%
frederic weisbeckerfrederic weisbecker331.54%33.45%
al viroal viro261.21%44.60%
tetsuo handatetsuo handa180.84%11.15%
andrew mortonandrew morton180.84%44.60%
bjorn steinbrinkbjorn steinbrink140.65%11.15%
li zefanli zefan120.56%11.15%
tejun heotejun heo120.56%11.15%
arjan van de venarjan van de ven100.47%11.15%
neil hormanneil horman100.47%11.15%
nigel cunninghamnigel cunningham70.33%11.15%
jan engelhardtjan engelhardt70.33%11.15%
ingo molnaringo molnar60.28%22.30%
jiri kosinajiri kosina50.23%11.15%
kosaki motohirokosaki motohiro40.19%11.15%
art haasart haas30.14%11.15%
christoph hellwigchristoph hellwig30.14%11.15%
greg kroah-hartmangreg kroah-hartman30.14%11.15%
matthew wilcoxmatthew wilcox20.09%11.15%
randy dunlaprandy dunlap10.05%11.15%
neil brownneil brown10.05%11.15%
peter zijlstrapeter zijlstra10.05%11.15%
johannes bergjohannes berg10.05%11.15%
Total2149100.00%87100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.