Release 4.15 kernel/signal.c
/*
* linux/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
*
* 2003-06-02 Jim Houston - Concurrent Computer Corp.
* Changes to use preallocated sigqueue structures
* to allow signals to be sent reliably.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/user.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/coredump.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/signalfd.h>
#include <linux/ratelimit.h>
#include <linux/tracehook.h>
#include <linux/capability.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/uprobes.h>
#include <linux/compat.h>
#include <linux/cn_proc.h>
#include <linux/compiler.h>
#include <linux/posix-timers.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
#include <asm/param.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/siginfo.h>
#include <asm/cacheflush.h>
#include "audit.h" /* audit_signal_info() */
/*
* SLAB caches for signal bits.
*/
static struct kmem_cache *sigqueue_cachep;
int print_fatal_signals __read_mostly;
static void __user *sig_handler(struct task_struct *t, int sig)
{
return t->sighand->action[sig - 1].sa.sa_handler;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 14 | 43.75% | 1 | 16.67% |
Roland McGrath | 7 | 21.88% | 1 | 16.67% |
Linus Torvalds | 7 | 21.88% | 1 | 16.67% |
Ingo Molnar | 2 | 6.25% | 2 | 33.33% |
Linus Torvalds (pre-git) | 2 | 6.25% | 1 | 16.67% |
Total | 32 | 100.00% | 6 | 100.00% |
static int sig_handler_ignored(void __user *handler, int sig)
{
/* Is it explicitly or implicitly ignored? */
return handler == SIG_IGN ||
(handler == SIG_DFL && sig_kernel_ignore(sig));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 17 | 54.84% | 1 | 50.00% |
Roland McGrath | 14 | 45.16% | 1 | 50.00% |
Total | 31 | 100.00% | 2 | 100.00% |
static int sig_task_ignored(struct task_struct *t, int sig, bool force)
{
void __user *handler;
handler = sig_handler(t, sig);
if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
handler == SIG_DFL && !(force && sig_kernel_only(sig)))
return 1;
return sig_handler_ignored(handler, sig);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 49 | 71.01% | 3 | 50.00% |
Pavel Emelyanov | 12 | 17.39% | 1 | 16.67% |
Roland McGrath | 5 | 7.25% | 1 | 16.67% |
Sukadev Bhattiprolu | 3 | 4.35% | 1 | 16.67% |
Total | 69 | 100.00% | 6 | 100.00% |
static int sig_ignored(struct task_struct *t, int sig, bool force)
{
/*
* Blocked signals are never ignored, since the
* signal handler may change by the time it is
* unblocked.
*/
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return 0;
/*
* Tracers may want to know about even ignored signal unless it
* is SIGKILL which can't be reported anyway but can be ignored
* by SIGNAL_UNKILLABLE task.
*/
if (t->ptrace && sig != SIGKILL)
return 0;
return sig_task_ignored(t, sig, force);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 29 | 43.28% | 3 | 37.50% |
Linus Torvalds | 19 | 28.36% | 1 | 12.50% |
Roland McGrath | 17 | 25.37% | 2 | 25.00% |
Sukadev Bhattiprolu | 1 | 1.49% | 1 | 12.50% |
Pavel Emelyanov | 1 | 1.49% | 1 | 12.50% |
Total | 67 | 100.00% | 8 | 100.00% |
/*
* Re-calculate pending state from the set of locally pending
* signals, globally pending signals, and blocked signals.
*/
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
{
unsigned long ready;
long i;
switch (_NSIG_WORDS) {
default:
for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
ready |= signal->sig[i] &~ blocked->sig[i];
break;
case 4: ready = signal->sig[3] &~ blocked->sig[3];
ready |= signal->sig[2] &~ blocked->sig[2];
ready |= signal->sig[1] &~ blocked->sig[1];
ready |= signal->sig[0] &~ blocked->sig[0];
break;
case 2: ready = signal->sig[1] &~ blocked->sig[1];
ready |= signal->sig[0] &~ blocked->sig[0];
break;
case 1: ready = signal->sig[0] &~ blocked->sig[0];
}
return ready != 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
William Lee Irwin III | 198 | 100.00% | 1 | 100.00% |
Total | 198 | 100.00% | 1 | 100.00% |
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
static int recalc_sigpending_tsk(struct task_struct *t)
{
if ((t->jobctl & JOBCTL_PENDING_MASK) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
return 1;
}
/*
* We must never clear the flag in another thread, or in current
* when it's possible the current syscall is returning -ERESTART*.
* So we don't clear it here, and only callers who know they should do.
*/
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 37 | 56.92% | 2 | 20.00% |
Roland McGrath | 12 | 18.46% | 2 | 20.00% |
Linus Torvalds (pre-git) | 10 | 15.38% | 2 | 20.00% |
Tejun Heo | 5 | 7.69% | 3 | 30.00% |
Linus Torvalds | 1 | 1.54% | 1 | 10.00% |
Total | 65 | 100.00% | 10 | 100.00% |
/*
* After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
* This is superfluous when called on current, the wakeup is a harmless no-op.
*/
void recalc_sigpending_and_wake(struct task_struct *t)
{
if (recalc_sigpending_tsk(t))
signal_wake_up(t, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roland McGrath | 23 | 95.83% | 1 | 50.00% |
Ingo Molnar | 1 | 4.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
void recalc_sigpending(void)
{
if (!recalc_sigpending_tsk(current) && !freezing(current))
clear_thread_flag(TIF_SIGPENDING);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 11 | 42.31% | 2 | 40.00% |
Roland McGrath | 8 | 30.77% | 1 | 20.00% |
Rafael J. Wysocki | 6 | 23.08% | 1 | 20.00% |
Linus Torvalds (pre-git) | 1 | 3.85% | 1 | 20.00% |
Total | 26 | 100.00% | 5 | 100.00% |
/* Given the mask, find the first available signal that should be serviced. */
#define SYNCHRONOUS_MASK \
(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
int next_signal(struct sigpending *pending, sigset_t *mask)
{
unsigned long i, *s, *m, x;
int sig = 0;
s = pending->signal.sig;
m = mask->sig;
/*
* Handle the first word specially: it contains the
* synchronous signals that need to be dequeued first.
*/
x = *s &~ *m;
if (x) {
if (x & SYNCHRONOUS_MASK)
x &= SYNCHRONOUS_MASK;
sig = ffz(~x) + 1;
return sig;
}
switch (_NSIG_WORDS) {
default:
for (i = 1; i < _NSIG_WORDS; ++i) {
x = *++s &~ *++m;
if (!x)
continue;
sig = ffz(~x) + i*_NSIG_BPW + 1;
break;
}
break;
case 2:
x = s[1] &~ m[1];
if (!x)
break;
sig = ffz(~x) + _NSIG_BPW + 1;
break;
case 1:
/* Nothing to do */
break;
}
return sig;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 74 | 40.66% | 2 | 40.00% |
Linus Torvalds | 58 | 31.87% | 1 | 20.00% |
Ingo Molnar | 49 | 26.92% | 1 | 20.00% |
Davide Libenzi | 1 | 0.55% | 1 | 20.00% |
Total | 182 | 100.00% | 5 | 100.00% |
static inline void print_dropped_signal(int sig)
{
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
if (!print_fatal_signals)
return;
if (!__ratelimit(&ratelimit_state))
return;
pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
current->comm, current->pid, sig);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Naohiro Ooiwa | 52 | 98.11% | 1 | 50.00% |
Wang Xiaoqiang | 1 | 1.89% | 1 | 50.00% |
Total | 53 | 100.00% | 2 | 100.00% |
/**
* task_set_jobctl_pending - set jobctl pending bits
* @task: target task
* @mask: pending bits to set
*
* Clear @mask from @task->jobctl. @mask must be subset of
* %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
* %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
* cleared. If @task is already being killed or exiting, this function
* becomes noop.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*
* RETURNS:
* %true if @mask is set, %false if made noop because @task was dying.
*/
bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
{
BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
return false;
if (mask & JOBCTL_STOP_SIGMASK)
task->jobctl &= ~JOBCTL_STOP_SIGMASK;
task->jobctl |= mask;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 88 | 98.88% | 1 | 50.00% |
Palmer Dabbelt | 1 | 1.12% | 1 | 50.00% |
Total | 89 | 100.00% | 2 | 100.00% |
/**
* task_clear_jobctl_trapping - clear jobctl trapping bit
* @task: target task
*
* If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
* Clear it and wake up the ptracer. Note that we don't need any further
* locking. @task->siglock guarantees that @task->parent points to the
* ptracer.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
void task_clear_jobctl_trapping(struct task_struct *task)
{
if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
task->jobctl &= ~JOBCTL_TRAPPING;
smp_mb(); /* advised by wake_up_bit() */
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 40 | 90.91% | 3 | 75.00% |
Oleg Nesterov | 4 | 9.09% | 1 | 25.00% |
Total | 44 | 100.00% | 4 | 100.00% |
/**
* task_clear_jobctl_pending - clear jobctl pending bits
* @task: target task
* @mask: pending bits to clear
*
* Clear @mask from @task->jobctl. @mask must be subset of
* %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
* STOP bits are cleared together.
*
* If clearing of @mask leaves no stop or trap pending, this function calls
* task_clear_jobctl_trapping().
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
{
BUG_ON(mask & ~JOBCTL_PENDING_MASK);
if (mask & JOBCTL_STOP_PENDING)
mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
task->jobctl &= ~mask;
if (!(task->jobctl & JOBCTL_PENDING_MASK))
task_clear_jobctl_trapping(task);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 55 | 96.49% | 5 | 71.43% |
Oleg Nesterov | 1 | 1.75% | 1 | 14.29% |
Palmer Dabbelt | 1 | 1.75% | 1 | 14.29% |
Total | 57 | 100.00% | 7 | 100.00% |
/**
* task_participate_group_stop - participate in a group stop
* @task: task participating in a group stop
*
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
* stop, the appropriate %SIGNAL_* flags are set.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*
* RETURNS:
* %true if group stop completion should be notified to the parent, %false
* otherwise.
*/
static bool task_participate_group_stop(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
if (!consume)
return false;
if (!WARN_ON_ONCE(sig->group_stop_count == 0))
sig->group_stop_count--;
/*
* Tell the caller to notify completion iff we are entering into a
* fresh group stop. Read comment in do_signal_stop() for details.
*/
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 101 | 96.19% | 5 | 83.33% |
Jamie Iles | 4 | 3.81% | 1 | 16.67% |
Total | 105 | 100.00% | 6 | 100.00% |
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *
__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
{
struct sigqueue *q = NULL;
struct user_struct *user;
/*
* Protect access to @t credentials. This can go away when all
* callers hold rcu read lock.
*/
rcu_read_lock();
user = get_uid(__task_cred(t)->user);
atomic_inc(&user->sigpending);
rcu_read_unlock();
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
q = kmem_cache_alloc(sigqueue_cachep, flags);
} else {
print_dropped_signal(sig);
}
if (unlikely(q == NULL)) {
atomic_dec(&user->sigpending);
free_uid(user);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = 0;
q->user = user;
}
return q;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jim Houston | 33 | 22.15% | 1 | 5.88% |
Roland McGrath | 30 | 20.13% | 1 | 5.88% |
Linus Torvalds (pre-git) | 21 | 14.09% | 4 | 23.53% |
Chris Wright | 17 | 11.41% | 2 | 11.76% |
Naohiro Ooiwa | 13 | 8.72% | 1 | 5.88% |
David Howells | 12 | 8.05% | 3 | 17.65% |
Linus Torvalds | 11 | 7.38% | 2 | 11.76% |
Thomas Gleixner | 7 | 4.70% | 1 | 5.88% |
Jiri Slaby | 4 | 2.68% | 1 | 5.88% |
Al Viro | 1 | 0.67% | 1 | 5.88% |
Total | 149 | 100.00% | 17 | 100.00% |
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
atomic_dec(&q->user->sigpending);
free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jim Houston | 17 | 38.64% | 1 | 16.67% |
Chris Wright | 17 | 38.64% | 1 | 16.67% |
Linus Torvalds (pre-git) | 10 | 22.73% | 4 | 66.67% |
Total | 44 | 100.00% | 6 | 100.00% |
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
sigemptyset(&queue->signal);
while (!list_empty(&queue->list)) {
q = list_entry(queue->list.next, struct sigqueue , list);
list_del_init(&q->list);
__sigqueue_free(q);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jim Houston | 60 | 92.31% | 1 | 25.00% |
Linus Torvalds (pre-git) | 5 | 7.69% | 3 | 75.00% |
Total | 65 | 100.00% | 4 | 100.00% |
/*
* Flush all pending signals for this kthread.
*/
void flush_signals(struct task_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&t->sighand->siglock, flags);
clear_tsk_thread_flag(t, TIF_SIGPENDING);
flush_sigqueue(&t->pending);
flush_sigqueue(&t->signal->shared_pending);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 22 | 34.92% | 1 | 20.00% |
Linus Torvalds (pre-git) | 18 | 28.57% | 1 | 20.00% |
Oleg Nesterov | 18 | 28.57% | 2 | 40.00% |
David Howells | 5 | 7.94% | 1 | 20.00% |
Total | 63 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_POSIX_TIMERS
static void __flush_itimer_signals(struct sigpending *pending)
{
sigset_t signal, retain;
struct sigqueue *q, *n;
signal = pending->signal;
sigemptyset(&retain);
list_for_each_entry_safe(q, n, &pending->list, list) {
int sig = q->info.si_signo;
if (likely(q->info.si_code != SI_TIMER)) {
sigaddset(&retain, sig);
} else {
sigdelset(&signal, sig);
list_del_init(&q->list);
__sigqueue_free(q);
}
}
sigorsets(&pending->signal, &signal, &retain);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 118 | 100.00% | 1 | 100.00% |
Total | 118 | 100.00% | 1 | 100.00% |
void flush_itimer_signals(void)
{
struct task_struct *tsk = current;
unsigned long flags;
spin_lock_irqsave(&tsk->sighand->siglock, flags);
__flush_itimer_signals(&tsk->pending);
__flush_itimer_signals(&tsk->signal->shared_pending);
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
#endif
void ignore_signals(struct task_struct *t)
{
int i;
for (i = 0; i < _NSIG; ++i)
t->sighand->action[i].sa.sa_handler = SIG_IGN;
flush_signals(t);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
/*
* Flush all handlers for a task.
*/
void
flush_signal_handlers(struct task_struct *t, int force_default)
{
int i;
struct k_sigaction *ka = &t->sighand->action[0];
for (i = _NSIG ; i != 0 ; i--) {
if (force_default || ka->sa.sa_handler != SIG_IGN)
ka->sa.sa_handler = SIG_DFL;
ka->sa.sa_flags = 0;
#ifdef __ARCH_HAS_SA_RESTORER
ka->sa.sa_restorer = NULL;
#endif
sigemptyset(&ka->sa.sa_mask);
ka++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 81 | 81.00% | 4 | 50.00% |
Kees Cook | 12 | 12.00% | 1 | 12.50% |
Linus Torvalds | 6 | 6.00% | 2 | 25.00% |
Andrew Morton | 1 | 1.00% | 1 | 12.50% |
Total | 100 | 100.00% | 8 | 100.00% |
int unhandled_signal(struct task_struct *tsk, int sig)
{
void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
if (is_global_init(tsk))
return 1;
if (handler != SIG_IGN && handler != SIG_DFL)
return 0;
/* if ptraced, let the tracer determine */
return !tsk->ptrace;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masoud Asgharifard Sharbiani | 31 | 49.21% | 1 | 25.00% |
Roland McGrath | 28 | 44.44% | 1 | 25.00% |
Tejun Heo | 3 | 4.76% | 1 | 25.00% |
Serge E. Hallyn | 1 | 1.59% | 1 | 25.00% |
Total | 63 | 100.00% | 4 | 100.00% |
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
/*
* Collect the siginfo appropriate to this signal. Check if
* there is another siginfo for the same signal.
*/
list_for_each_entry(q, &list->list, list) {
if (q->info.si_signo == sig) {
if (first)
goto still_pending;
first = q;
}
}
sigdelset(&list->signal, sig);
if (first) {
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
*resched_timer =
(first->flags & SIGQUEUE_PREALLOC) &&
(info->si_code == SI_TIMER) &&
(info->si_sys_private);
__sigqueue_free(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
* a fast-pathed signal or we must have been
* out of queue space. So zero out the info.
*/
info->si_signo = sig;
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = 0;
info->si_uid = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 62 | 37.35% | 3 | 30.00% |
Jim Houston | 57 | 34.34% | 1 | 10.00% |
Eric W. Biedermann | 29 | 17.47% | 1 | 10.00% |
Oleg Nesterov | 16 | 9.64% | 3 | 30.00% |
Linus Torvalds | 1 | 0.60% | 1 | 10.00% |
Randy Dunlap | 1 | 0.60% | 1 | 10.00% |
Total | 166 | 100.00% | 10 | 100.00% |
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
siginfo_t *info, bool *resched_timer)
{
int sig = next_signal(pending, mask);
if (sig)
collect_signal(sig, pending, info, resched_timer);
return sig;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 32 | 62.75% | 3 | 37.50% |
Ingo Molnar | 10 | 19.61% | 2 | 25.00% |
Eric W. Biedermann | 6 | 11.76% | 1 | 12.50% |
Linus Torvalds | 2 | 3.92% | 1 | 12.50% |
Roland McGrath | 1 | 1.96% | 1 | 12.50% |
Total | 51 | 100.00% | 8 | 100.00% |
/*
* Dequeue a signal and return the element to the caller, which is
* expected to free it.
*
* All callers have to hold the siglock.
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info, &resched_timer);
#ifdef CONFIG_POSIX_TIMERS
/*
* itimer signal ?
*
* itimers are process shared and we restart periodic
* itimers in the signal delivery path to prevent DoS
* attacks in the high resolution timer case. This is
* compliant with the old way of self-restarting
* itimers, as the SIGALRM is a legacy signal and only
* queued once. Changing the restart behaviour to
* restart the timer in the signal dequeue path is
* reducing the timer noise on heavy loaded !highres
* systems too.
*/
if (unlikely(signr == SIGALRM)) {
struct hrtimer *tmr = &tsk->signal->real_timer;
if (!hrtimer_is_queued(tmr) &&
tsk->signal->it_real_incr != 0) {
hrtimer_forward(tmr, tmr->base->get_time(),
tsk->signal->it_real_incr);
hrtimer_restart(tmr);
}
}
#endif
}
recalc_sigpending();
if (!signr)
return 0;
if (unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our
* caller might release the siglock and then the pending
* stop signal it is about to process is no longer in the
* pending bitmasks, but must still be cleared by a SIGCONT
* (and overruled by a SIGKILL). So those cases clear this
* shared flag after we've set it. Note that this flag may
* remain set after the signal we return is ignored or
* handled. That doesn't matter because its only purpose
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
#ifdef CONFIG_POSIX_TIMERS
if (resched_timer) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
* irqs disabled here, since the posix-timers code is
* about to disable them again anyway.
*/
spin_unlock(&tsk->sighand->siglock);
posixtimer_rearm(info);
spin_lock(&tsk->sighand->siglock);
}
#endif
return signr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 68 | 32.38% | 2 | 11.11% |
Ingo Molnar | 47 | 22.38% | 2 | 11.11% |
Roland McGrath | 39 | 18.57% | 4 | 22.22% |
Eric W. Biedermann | 12 | 5.71% | 1 | 5.56% |
Nico Pitre | 10 | 4.76% | 1 | 5.56% |
George Anzinger | 10 | 4.76% | 1 | 5.56% |
Linus Torvalds | 8 | 3.81% | 2 | 11.11% |
Pavel Emelyanov | 7 | 3.33% | 1 | 5.56% |
Benjamin Herrenschmidt | 5 | 2.38% | 1 | 5.56% |
Tejun Heo | 2 | 0.95% | 1 | 5.56% |
Oleg Nesterov | 1 | 0.48% | 1 | 5.56% |
Randy Dunlap | 1 | 0.48% | 1 | 5.56% |
Total | 210 | 100.00% | 18 | 100.00% |
/*
* Tell a process that it has a new active signal..
*
* NOTE! we rely on the previous spin_lock to
* lock interrupts for us! We can only be called with
* "siglock" held, and the local interrupt must
* have been disabled when that got acquired!
*
* No need to set need_resched since signal event passing
* goes through ->blocked
*/
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and
* handle its death signal.
*/
if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
kick_process(t);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 21 | 53.85% | 2 | 33.33% |
Linus Torvalds (pre-git) | 8 | 20.51% | 1 | 16.67% |
Oleg Nesterov | 7 | 17.95% | 1 | 16.67% |
Linus Torvalds | 3 | 7.69% | 2 | 33.33% |
Total | 39 | 100.00% | 6 | 100.00% |
/*
* Remove signals in mask from the pending set and queue.
* Returns 1 if any signals were found.
*
* All callers must be holding the siglock.
*/
static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *