cregit-Linux how code gets into the kernel

Release 4.15 kernel/signal.c

Directory: kernel
/*
 *  linux/kernel/signal.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
 *
 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
 *              Changes to use preallocated sigqueue structures
 *              to allow signals to be sent reliably.
 */

#include <linux/slab.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/user.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/coredump.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/signalfd.h>
#include <linux/ratelimit.h>
#include <linux/tracehook.h>
#include <linux/capability.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/uprobes.h>
#include <linux/compat.h>
#include <linux/cn_proc.h>
#include <linux/compiler.h>
#include <linux/posix-timers.h>


#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>

#include <asm/param.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/siginfo.h>
#include <asm/cacheflush.h>
#include "audit.h"	/* audit_signal_info() */

/*
 * SLAB caches for signal bits.
 */


static struct kmem_cache *sigqueue_cachep;


int print_fatal_signals __read_mostly;


static void __user *sig_handler(struct task_struct *t, int sig) { return t->sighand->action[sig - 1].sa.sa_handler; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1443.75%116.67%
Roland McGrath721.88%116.67%
Linus Torvalds721.88%116.67%
Ingo Molnar26.25%233.33%
Linus Torvalds (pre-git)26.25%116.67%
Total32100.00%6100.00%


static int sig_handler_ignored(void __user *handler, int sig) { /* Is it explicitly or implicitly ignored? */ return handler == SIG_IGN || (handler == SIG_DFL && sig_kernel_ignore(sig)); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1754.84%150.00%
Roland McGrath1445.16%150.00%
Total31100.00%2100.00%


static int sig_task_ignored(struct task_struct *t, int sig, bool force) { void __user *handler; handler = sig_handler(t, sig); if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && handler == SIG_DFL && !(force && sig_kernel_only(sig))) return 1; return sig_handler_ignored(handler, sig); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov4971.01%350.00%
Pavel Emelyanov1217.39%116.67%
Roland McGrath57.25%116.67%
Sukadev Bhattiprolu34.35%116.67%
Total69100.00%6100.00%


static int sig_ignored(struct task_struct *t, int sig, bool force) { /* * Blocked signals are never ignored, since the * signal handler may change by the time it is * unblocked. */ if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) return 0; /* * Tracers may want to know about even ignored signal unless it * is SIGKILL which can't be reported anyway but can be ignored * by SIGNAL_UNKILLABLE task. */ if (t->ptrace && sig != SIGKILL) return 0; return sig_task_ignored(t, sig, force); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov2943.28%337.50%
Linus Torvalds1928.36%112.50%
Roland McGrath1725.37%225.00%
Sukadev Bhattiprolu11.49%112.50%
Pavel Emelyanov11.49%112.50%
Total67100.00%8100.00%

/* * Re-calculate pending state from the set of locally pending * signals, globally pending signals, and blocked signals. */
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) { unsigned long ready; long i; switch (_NSIG_WORDS) { default: for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) ready |= signal->sig[i] &~ blocked->sig[i]; break; case 4: ready = signal->sig[3] &~ blocked->sig[3]; ready |= signal->sig[2] &~ blocked->sig[2]; ready |= signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 2: ready = signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 1: ready = signal->sig[0] &~ blocked->sig[0]; } return ready != 0; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III198100.00%1100.00%
Total198100.00%1100.00%

#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
static int recalc_sigpending_tsk(struct task_struct *t) { if ((t->jobctl & JOBCTL_PENDING_MASK) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); return 1; } /* * We must never clear the flag in another thread, or in current * when it's possible the current syscall is returning -ERESTART*. * So we don't clear it here, and only callers who know they should do. */ return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3756.92%220.00%
Roland McGrath1218.46%220.00%
Linus Torvalds (pre-git)1015.38%220.00%
Tejun Heo57.69%330.00%
Linus Torvalds11.54%110.00%
Total65100.00%10100.00%

/* * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. * This is superfluous when called on current, the wakeup is a harmless no-op. */
void recalc_sigpending_and_wake(struct task_struct *t) { if (recalc_sigpending_tsk(t)) signal_wake_up(t, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Roland McGrath2395.83%150.00%
Ingo Molnar14.17%150.00%
Total24100.00%2100.00%


void recalc_sigpending(void) { if (!recalc_sigpending_tsk(current) && !freezing(current)) clear_thread_flag(TIF_SIGPENDING); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1142.31%240.00%
Roland McGrath830.77%120.00%
Rafael J. Wysocki623.08%120.00%
Linus Torvalds (pre-git)13.85%120.00%
Total26100.00%5100.00%

/* Given the mask, find the first available signal that should be serviced. */ #define SYNCHRONOUS_MASK \ (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
int next_signal(struct sigpending *pending, sigset_t *mask) { unsigned long i, *s, *m, x; int sig = 0; s = pending->signal.sig; m = mask->sig; /* * Handle the first word specially: it contains the * synchronous signals that need to be dequeued first. */ x = *s &~ *m; if (x) { if (x & SYNCHRONOUS_MASK) x &= SYNCHRONOUS_MASK; sig = ffz(~x) + 1; return sig; } switch (_NSIG_WORDS) { default: for (i = 1; i < _NSIG_WORDS; ++i) { x = *++s &~ *++m; if (!x) continue; sig = ffz(~x) + i*_NSIG_BPW + 1; break; } break; case 2: x = s[1] &~ m[1]; if (!x) break; sig = ffz(~x) + _NSIG_BPW + 1; break; case 1: /* Nothing to do */ break; } return sig; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7440.66%240.00%
Linus Torvalds5831.87%120.00%
Ingo Molnar4926.92%120.00%
Davide Libenzi10.55%120.00%
Total182100.00%5100.00%


static inline void print_dropped_signal(int sig) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); if (!print_fatal_signals) return; if (!__ratelimit(&ratelimit_state)) return; pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", current->comm, current->pid, sig); }

Contributors

PersonTokensPropCommitsCommitProp
Naohiro Ooiwa5298.11%150.00%
Wang Xiaoqiang11.89%150.00%
Total53100.00%2100.00%

/** * task_set_jobctl_pending - set jobctl pending bits * @task: target task * @mask: pending bits to set * * Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is * cleared. If @task is already being killed or exiting, this function * becomes noop. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if @mask is set, %false if made noop because @task was dying. */
bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) { BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) return false; if (mask & JOBCTL_STOP_SIGMASK) task->jobctl &= ~JOBCTL_STOP_SIGMASK; task->jobctl |= mask; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo8898.88%150.00%
Palmer Dabbelt11.12%150.00%
Total89100.00%2100.00%

/** * task_clear_jobctl_trapping - clear jobctl trapping bit * @task: target task * * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. * Clear it and wake up the ptracer. Note that we don't need any further * locking. @task->siglock guarantees that @task->parent points to the * ptracer. * * CONTEXT: * Must be called with @task->sighand->siglock held. */
void task_clear_jobctl_trapping(struct task_struct *task) { if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { task->jobctl &= ~JOBCTL_TRAPPING; smp_mb(); /* advised by wake_up_bit() */ wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo4090.91%375.00%
Oleg Nesterov49.09%125.00%
Total44100.00%4100.00%

/** * task_clear_jobctl_pending - clear jobctl pending bits * @task: target task * @mask: pending bits to clear * * Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other * STOP bits are cleared together. * * If clearing of @mask leaves no stop or trap pending, this function calls * task_clear_jobctl_trapping(). * * CONTEXT: * Must be called with @task->sighand->siglock held. */
void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) { BUG_ON(mask & ~JOBCTL_PENDING_MASK); if (mask & JOBCTL_STOP_PENDING) mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; task->jobctl &= ~mask; if (!(task->jobctl & JOBCTL_PENDING_MASK)) task_clear_jobctl_trapping(task); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5596.49%571.43%
Oleg Nesterov11.75%114.29%
Palmer Dabbelt11.75%114.29%
Total57100.00%7100.00%

/** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * Group stop states are cleared and the group stop count is consumed if * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group * stop, the appropriate %SIGNAL_* flags are set. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if group stop completion should be notified to the parent, %false * otherwise. */
static bool task_participate_group_stop(struct task_struct *task) { struct signal_struct *sig = task->signal; bool consume = task->jobctl & JOBCTL_STOP_CONSUME; WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); if (!consume) return false; if (!WARN_ON_ONCE(sig->group_stop_count == 0)) sig->group_stop_count--; /* * Tell the caller to notify completion iff we are entering into a * fresh group stop. Read comment in do_signal_stop() for details. */ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo10196.19%583.33%
Jamie Iles43.81%116.67%
Total105100.00%6100.00%

/* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */
static struct sigqueue * __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; struct user_struct *user; /* * Protect access to @t credentials. This can go away when all * callers hold rcu read lock. */ rcu_read_lock(); user = get_uid(__task_cred(t)->user); atomic_inc(&user->sigpending); rcu_read_unlock(); if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } if (unlikely(q == NULL)) { atomic_dec(&user->sigpending); free_uid(user); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; q->user = user; } return q; }

Contributors

PersonTokensPropCommitsCommitProp
Jim Houston3322.15%15.88%
Roland McGrath3020.13%15.88%
Linus Torvalds (pre-git)2114.09%423.53%
Chris Wright1711.41%211.76%
Naohiro Ooiwa138.72%15.88%
David Howells128.05%317.65%
Linus Torvalds117.38%211.76%
Thomas Gleixner74.70%15.88%
Jiri Slaby42.68%15.88%
Al Viro10.67%15.88%
Total149100.00%17100.00%


static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) return; atomic_dec(&q->user->sigpending); free_uid(q->user); kmem_cache_free(sigqueue_cachep, q); }

Contributors

PersonTokensPropCommitsCommitProp
Jim Houston1738.64%116.67%
Chris Wright1738.64%116.67%
Linus Torvalds (pre-git)1022.73%466.67%
Total44100.00%6100.00%


void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; sigemptyset(&queue->signal); while (!list_empty(&queue->list)) { q = list_entry(queue->list.next, struct sigqueue , list); list_del_init(&q->list); __sigqueue_free(q); } }

Contributors

PersonTokensPropCommitsCommitProp
Jim Houston6092.31%125.00%
Linus Torvalds (pre-git)57.69%375.00%
Total65100.00%4100.00%

/* * Flush all pending signals for this kthread. */
void flush_signals(struct task_struct *t) { unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); clear_tsk_thread_flag(t, TIF_SIGPENDING); flush_sigqueue(&t->pending); flush_sigqueue(&t->signal->shared_pending); spin_unlock_irqrestore(&t->sighand->siglock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2234.92%120.00%
Linus Torvalds (pre-git)1828.57%120.00%
Oleg Nesterov1828.57%240.00%
David Howells57.94%120.00%
Total63100.00%5100.00%

#ifdef CONFIG_POSIX_TIMERS
static void __flush_itimer_signals(struct sigpending *pending) { sigset_t signal, retain; struct sigqueue *q, *n; signal = pending->signal; sigemptyset(&retain); list_for_each_entry_safe(q, n, &pending->list, list) { int sig = q->info.si_signo; if (likely(q->info.si_code != SI_TIMER)) { sigaddset(&retain, sig); } else { sigdelset(&signal, sig); list_del_init(&q->list); __sigqueue_free(q); } } sigorsets(&pending->signal, &signal, &retain); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov118100.00%1100.00%
Total118100.00%1100.00%


void flush_itimer_signals(void) { struct task_struct *tsk = current; unsigned long flags; spin_lock_irqsave(&tsk->sighand->siglock, flags); __flush_itimer_signals(&tsk->pending); __flush_itimer_signals(&tsk->signal->shared_pending); spin_unlock_irqrestore(&tsk->sighand->siglock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov60100.00%1100.00%
Total60100.00%1100.00%

#endif
void ignore_signals(struct task_struct *t) { int i; for (i = 0; i < _NSIG; ++i) t->sighand->action[i].sa.sa_handler = SIG_IGN; flush_signals(t); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov46100.00%1100.00%
Total46100.00%1100.00%

/* * Flush all handlers for a task. */
void flush_signal_handlers(struct task_struct *t, int force_default) { int i; struct k_sigaction *ka = &t->sighand->action[0]; for (i = _NSIG ; i != 0 ; i--) { if (force_default || ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; ka->sa.sa_flags = 0; #ifdef __ARCH_HAS_SA_RESTORER ka->sa.sa_restorer = NULL; #endif sigemptyset(&ka->sa.sa_mask); ka++; } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)8181.00%450.00%
Kees Cook1212.00%112.50%
Linus Torvalds66.00%225.00%
Andrew Morton11.00%112.50%
Total100100.00%8100.00%


int unhandled_signal(struct task_struct *tsk, int sig) { void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; if (is_global_init(tsk)) return 1; if (handler != SIG_IGN && handler != SIG_DFL) return 0; /* if ptraced, let the tracer determine */ return !tsk->ptrace; }

Contributors

PersonTokensPropCommitsCommitProp
Masoud Asgharifard Sharbiani3149.21%125.00%
Roland McGrath2844.44%125.00%
Tejun Heo34.76%125.00%
Serge E. Hallyn11.59%125.00%
Total63100.00%4100.00%


static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, bool *resched_timer) { struct sigqueue *q, *first = NULL; /* * Collect the siginfo appropriate to this signal. Check if * there is another siginfo for the same signal. */ list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { if (first) goto still_pending; first = q; } } sigdelset(&list->signal, sig); if (first) { still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); *resched_timer = (first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER) && (info->si_sys_private); __sigqueue_free(first); } else { /* * Ok, it wasn't in the queue. This must be * a fast-pathed signal or we must have been * out of queue space. So zero out the info. */ info->si_signo = sig; info->si_errno = 0; info->si_code = SI_USER; info->si_pid = 0; info->si_uid = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6237.35%330.00%
Jim Houston5734.34%110.00%
Eric W. Biedermann2917.47%110.00%
Oleg Nesterov169.64%330.00%
Linus Torvalds10.60%110.00%
Randy Dunlap10.60%110.00%
Total166100.00%10100.00%


static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); if (sig) collect_signal(sig, pending, info, resched_timer); return sig; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3262.75%337.50%
Ingo Molnar1019.61%225.00%
Eric W. Biedermann611.76%112.50%
Linus Torvalds23.92%112.50%
Roland McGrath11.96%112.50%
Total51100.00%8100.00%

/* * Dequeue a signal and return the element to the caller, which is * expected to free it. * * All callers have to hold the siglock. */
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { bool resched_timer = false; int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info, &resched_timer); #ifdef CONFIG_POSIX_TIMERS /* * itimer signal ? * * itimers are process shared and we restart periodic * itimers in the signal delivery path to prevent DoS * attacks in the high resolution timer case. This is * compliant with the old way of self-restarting * itimers, as the SIGALRM is a legacy signal and only * queued once. Changing the restart behaviour to * restart the timer in the signal dequeue path is * reducing the timer noise on heavy loaded !highres * systems too. */ if (unlikely(signr == SIGALRM)) { struct hrtimer *tmr = &tsk->signal->real_timer; if (!hrtimer_is_queued(tmr) && tsk->signal->it_real_incr != 0) { hrtimer_forward(tmr, tmr->base->get_time(), tsk->signal->it_real_incr); hrtimer_restart(tmr); } } #endif } recalc_sigpending(); if (!signr) return 0; if (unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our * caller might release the siglock and then the pending * stop signal it is about to process is no longer in the * pending bitmasks, but must still be cleared by a SIGCONT * (and overruled by a SIGKILL). So those cases clear this * shared flag after we've set it. Note that this flag may * remain set after the signal we return is ignored or * handled. That doesn't matter because its only purpose * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ current->jobctl |= JOBCTL_STOP_DEQUEUED; } #ifdef CONFIG_POSIX_TIMERS if (resched_timer) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave * irqs disabled here, since the posix-timers code is * about to disable them again anyway. */ spin_unlock(&tsk->sighand->siglock); posixtimer_rearm(info); spin_lock(&tsk->sighand->siglock); } #endif return signr; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner6832.38%211.11%
Ingo Molnar4722.38%211.11%
Roland McGrath3918.57%422.22%
Eric W. Biedermann125.71%15.56%
Nico Pitre104.76%15.56%
George Anzinger104.76%15.56%
Linus Torvalds83.81%211.11%
Pavel Emelyanov73.33%15.56%
Benjamin Herrenschmidt52.38%15.56%
Tejun Heo20.95%15.56%
Oleg Nesterov10.48%15.56%
Randy Dunlap10.48%15.56%
Total210100.00%18100.00%

/* * Tell a process that it has a new active signal.. * * NOTE! we rely on the previous spin_lock to * lock interrupts for us! We can only be called with * "siglock" held, and the local interrupt must * have been disabled when that got acquired! * * No need to set need_resched since signal event passing * goes through ->blocked */
void signal_wake_up_state(struct task_struct *t, unsigned int state) { set_tsk_thread_flag(t, TIF_SIGPENDING); /* * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) kick_process(t); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2153.85%233.33%
Linus Torvalds (pre-git)820.51%116.67%
Oleg Nesterov717.95%116.67%
Linus Torvalds37.69%233.33%
Total39100.00%6100.00%

/* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. */
static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *