Release 4.15 kernel/softirq.c
/*
* linux/kernel/softirq.c
*
* Copyright (C) 1992 Linus Torvalds
*
* Distribute under GPLv2.
*
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
#include <linux/irq.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
by its own spinlocks.
- Even if softirq is serialized, only local cpu is marked for
execution. Hence, we get something sort of weak cpu binding.
Though it is still not clear, will it result in better locality
or will not.
Examples:
- NET RX softirq. It is multithreaded and does not require
any global serialization.
- NET TX softirq. It kicks software netdevice queues, hence
it is logically serialized per device, but this serialization
is invisible to common code.
- Tasklets: serialized wrt itself.
*/
#ifndef __ARCH_IRQ_STAT
irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat);
#endif
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
static void wakeup_softirqd(void)
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
if (tsk && tsk->state != TASK_RUNNING)
wake_up_process(tsk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 29 | 85.29% | 1 | 25.00% |
Rusty Russell | 3 | 8.82% | 1 | 25.00% |
Christoph Lameter | 1 | 2.94% | 1 | 25.00% |
Thomas Gleixner | 1 | 2.94% | 1 | 25.00% |
Total | 34 | 100.00% | 4 | 100.00% |
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness.
*/
static bool ksoftirqd_running(void)
{
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
return tsk && (tsk->state == TASK_RUNNING);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
* softirq processing.
* - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
* on local_bh_disable or local_bh_enable.
* This lets us distinguish between whether we are currently processing
* softirq and whether we just have bh disabled.
*/
/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
WARN_ON_ONCE(in_irq());
raw_local_irq_save(flags);
/*
* The preempt tracer hooks into preempt_count_add and will break
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
* is set and before current->softirq_enabled is cleared.
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
__preempt_count_add(cnt);
/*
* Were softirqs turned off above:
*/
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = get_lock_parent_ip();
#endif
trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 41 | 48.24% | 1 | 11.11% |
Steven Rostedt | 12 | 14.12% | 1 | 11.11% |
Heiko Carstens | 12 | 14.12% | 1 | 11.11% |
Peter Zijlstra | 9 | 10.59% | 4 | 44.44% |
Venkatesh Pallipadi | 7 | 8.24% | 1 | 11.11% |
Sebastian Andrzej Siewior | 4 | 4.71% | 1 | 11.11% |
Total | 85 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(__local_bh_disable_ip);
#endif /* CONFIG_TRACE_IRQFLAGS */
static void __local_bh_enable(unsigned int cnt)
{
lockdep_assert_irqs_disabled();
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
preempt_count_sub(cnt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 17 | 50.00% | 1 | 16.67% |
Venkatesh Pallipadi | 10 | 29.41% | 1 | 16.67% |
Peter Zijlstra | 5 | 14.71% | 2 | 33.33% |
Davidlohr Bueso A | 1 | 2.94% | 1 | 16.67% |
Frédéric Weisbecker | 1 | 2.94% | 1 | 16.67% |
Total | 34 | 100.00% | 6 | 100.00% |
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
* without processing still-pending softirqs:
*/
void _local_bh_enable(void)
{
WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Venkatesh Pallipadi | 9 | 50.00% | 1 | 33.33% |
Frédéric Weisbecker | 6 | 33.33% | 1 | 33.33% |
Ingo Molnar | 3 | 16.67% | 1 | 33.33% |
Total | 18 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
WARN_ON_ONCE(in_irq());
lockdep_assert_irqs_enabled();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
#endif
/*
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
* Run softirq if any pending. And do it in its own stack
* as we may be calling this deep in a task call stack already.
*/
do_softirq();
}
preempt_count_dec();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_enable();
#endif
preempt_check_resched();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 49 | 59.04% | 1 | 11.11% |
Tim Chen | 10 | 12.05% | 1 | 11.11% |
Peter Zijlstra | 8 | 9.64% | 2 | 22.22% |
Johannes Berg | 8 | 9.64% | 1 | 11.11% |
Frédéric Weisbecker | 6 | 7.23% | 2 | 22.22% |
Joe Perches | 1 | 1.20% | 1 | 11.11% |
Venkatesh Pallipadi | 1 | 1.20% | 1 | 11.11% |
Total | 83 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(__local_bh_enable_ip);
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms.
* The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
* certain cases, such as stop_machine(), jiffies may cease to
* increment and so we need the MAX_SOFTIRQ_RESTART limit as
* well to make sure we eventually return from this method.
*
* These limits have been established via experimentation.
* The two things to balance is latency against fairness -
* we want to handle softirqs as soon as possible, but they
* should not be able to lock up the box.
*/
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* When we run softirqs from irq_exit() and thus on the hardirq stack we need
* to keep the lockdep irq context tracking as tight as possible in order to
* not miss-qualify lock contexts and miss possible deadlocks.
*/
static inline bool lockdep_softirq_start(void)
{
bool in_hardirq = false;
if (trace_hardirq_context(current)) {
in_hardirq = true;
trace_hardirq_exit();
}
lockdep_softirq_enter();
return in_hardirq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 52.78% | 1 | 50.00% |
Frédéric Weisbecker | 17 | 47.22% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static inline void lockdep_softirq_end(bool in_hardirq)
{
lockdep_softirq_exit();
if (in_hardirq)
trace_hardirq_enter();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 17 | 85.00% | 1 | 50.00% |
Frédéric Weisbecker | 3 | 15.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
#else
static inline bool lockdep_softirq_start(void) { return false; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 6 | 50.00% | 1 | 50.00% |
Frédéric Weisbecker | 6 | 50.00% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
static inline void lockdep_softirq_end(bool in_hardirq) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 7 | 77.78% | 1 | 50.00% |
Frédéric Weisbecker | 2 | 22.22% | 1 | 50.00% |
Total | 9 | 100.00% | 2 | 100.00% |
#endif
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
__u32 pending;
int softirq_bit;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
* softirq. A softirq handled such as network RX might set PF_MEMALLOC
* again if the socket is related to swap
*/
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
in_hardirq = lockdep_softirq_start();
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
local_irq_enable();
h = softirq_vec;
while ((softirq_bit = ffs(pending))) {
unsigned int vec_nr;
int prev_count;
h += softirq_bit - 1;
vec_nr = h - softirq_vec;
prev_count = preempt_count();
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
h->action(h);
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
vec_nr, softirq_to_name[vec_nr], h->action,
prev_count, preempt_count());
preempt_count_set(prev_count);
}
h++;
pending >>= softirq_bit;
}
rcu_bh_qs();
local_irq_disable();
pending = local_softirq_pending();
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
goto restart;
wakeup_softirqd();
}
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 41 | 15.71% | 2 | 5.13% |
Linus Torvalds (pre-git) | 34 | 13.03% | 5 | 12.82% |
Joe Perches | 28 | 10.73% | 2 | 5.13% |
Eric Dumazet | 23 | 8.81% | 2 | 5.13% |
Mel Gorman | 22 | 8.43% | 1 | 2.56% |
Linus Torvalds | 16 | 6.13% | 3 | 7.69% |
Frédéric Weisbecker | 16 | 6.13% | 3 | 7.69% |
Peter Zijlstra | 15 | 5.75% | 3 | 7.69% |
Jason Baron | 12 | 4.60% | 2 | 5.13% |
Andrew Morton | 10 | 3.83% | 3 | 7.69% |
Paul Mackerras | 8 | 3.07% | 1 | 2.56% |
Ben Greear | 8 | 3.07% | 1 | 2.56% |
Venkatesh Pallipadi | 6 | 2.30% | 1 | 2.56% |
David S. Miller | 5 | 1.92% | 1 | 2.56% |
Ingo Molnar | 5 | 1.92% | 2 | 5.13% |
Andi Kleen | 4 | 1.53% | 2 | 5.13% |
Keika Kobayashi | 4 | 1.53% | 1 | 2.56% |
Davidlohr Bueso A | 1 | 0.38% | 1 | 2.56% |
Alexander Potapenko | 1 | 0.38% | 1 | 2.56% |
Dipankar Sarma | 1 | 0.38% | 1 | 2.56% |
Neil Brown | 1 | 0.38% | 1 | 2.56% |
Total | 261 | 100.00% | 39 | 100.00% |
asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending && !ksoftirqd_running())
do_softirq_own_stack();
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 36 | 75.00% | 1 | 20.00% |
Linus Torvalds | 6 | 12.50% | 1 | 20.00% |
Eric Dumazet | 4 | 8.33% | 1 | 20.00% |
Frédéric Weisbecker | 1 | 2.08% | 1 | 20.00% |
Andi Kleen | 1 | 2.08% | 1 | 20.00% |
Total | 48 | 100.00% | 5 | 100.00% |
/*
* Enter an interrupt context.
*/
void irq_enter(void)
{
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
local_bh_disable();
tick_irq_enter();
_local_bh_enable();
}
__irq_enter();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Venkatesh Pallipadi | 15 | 41.67% | 2 | 22.22% |
Ingo Molnar | 8 | 22.22% | 1 | 11.11% |
Thomas Gleixner | 6 | 16.67% | 2 | 22.22% |
Frédéric Weisbecker | 4 | 11.11% | 3 | 33.33% |
Paul E. McKenney | 3 | 8.33% | 1 | 11.11% |
Total | 36 | 100.00% | 9 | 100.00% |
static inline void invoke_softirq(void)
{
if (ksoftirqd_running())
return;
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
* at this stage.
*/
__do_softirq();
#else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
do_softirq_own_stack();
#endif
} else {
wakeup_softirqd();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 19 | 44.19% | 1 | 16.67% |
Frédéric Weisbecker | 17 | 39.53% | 3 | 50.00% |
Eric Dumazet | 6 | 13.95% | 1 | 16.67% |
Christoph Hellwig | 1 | 2.33% | 1 | 16.67% |
Total | 43 | 100.00% | 6 | 100.00% |
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
int cpu = smp_processor_id();
/* Make sure that timer wheel updates are propagated */
if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
if (!in_interrupt())
tick_nohz_irq_exit();
}
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
/*
* Exit an interrupt context. Process softirqs if needed and possible:
*/
void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
#else
lockdep_assert_irqs_disabled();
#endif
account_irq_exit_time(current);
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
tick_irq_exit();
rcu_irq_exit();
trace_hardirq_exit(); /* must be last! */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 20 | 38.46% | 1 | 7.14% |
Thomas Gleixner | 11 | 21.15% | 2 | 14.29% |
Frédéric Weisbecker | 10 | 19.23% | 7 | 50.00% |
Peter Zijlstra | 5 | 9.62% | 2 | 14.29% |
Martin Schwidefsky | 4 | 7.69% | 1 | 7.14% |
Ingo Molnar | 2 | 3.85% | 1 | 7.14% |
Total | 52 | 100.00% | 14 | 100.00% |
/*
* This function must run with irqs disabled!
*/
inline void raise_softirq_irqoff(unsigned int nr)
{
__raise_softirq_irqoff(nr);
/*
* If we're in an interrupt or softirq, we're done
* (this also catches softirq-disabled code). We will
* actually run the softirq once we return from
* the irq or softirq.
*
* Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon.
*/
if (!in_interrupt())
wakeup_softirqd();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 19 | 76.00% | 1 | 20.00% |
Rusty Russell | 3 | 12.00% | 2 | 40.00% |
Ingo Molnar | 3 | 12.00% | 2 | 40.00% |
Total | 25 | 100.00% | 5 | 100.00% |
void raise_softirq(unsigned int nr)
{
unsigned long flags;
local_irq_save(flags);
raise_softirq_irqoff(nr);
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 25 | 89.29% | 3 | 50.00% |
Robert Love | 1 | 3.57% | 1 | 16.67% |
Rusty Russell | 1 | 3.57% | 1 | 16.67% |
Linus Torvalds (pre-git) | 1 | 3.57% | 1 | 16.67% |
Total | 28 | 100.00% | 6 | 100.00% |
void __raise_softirq_irqoff(unsigned int nr)
{
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 28 | 100.00% | 3 | 100.00% |
Total | 28 | 100.00% | 3 | 100.00% |
/*
* Tasklets
*/
struct tasklet_head {
struct tasklet_struct *head;
struct tasklet_struct **tail;
};
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
void __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);
t->next = NULL;
*__this_cpu_read(tasklet_vec.tail) = t;
__this_cpu_write(tasklet_vec.tail, &(t->next));
raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 39 | 66.10% | 2 | 33.33% |
Olof Johansson | 12 | 20.34% | 1 | 16.67% |
Christoph Lameter | 5 | 8.47% | 1 | 16.67% |
Rusty Russell | 3 | 5.08% | 2 | 33.33% |
Total | 59 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(__tasklet_schedule);
void __tasklet_hi_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);
t->next = NULL;
*__this_cpu_read(tasklet_hi_vec.tail) = t;
__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 39 | 66.10% | 2 | 33.33% |
Olof Johansson | 12 | 20.34% | 1 | 16.67% |
Christoph Lameter | 5 | 8.47% | 1 | 16.67% |
Rusty Russell | 3 | 5.08% | 2 | 33.33% |
Total | 59 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(__tasklet_hi_schedule);
static __latent_entropy void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
local_irq_disable();
list = __this_cpu_read(tasklet_vec.head);
__this_cpu_write(tasklet_vec.head, NULL);
__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
local_irq_enable();
while (list) {
struct tasklet_struct *t = list;
list = list->next;
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED,
&t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
continue;
}
tasklet_unlock(t);
}
local_irq_disable();
t->next = NULL;
*__this_cpu_read(tasklet_vec.tail) = t;
__this_cpu_write(tasklet_vec.tail, &(t->next));
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_enable();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 89 | 51.15% | 7 | 41.18% |
Linus Torvalds | 40 | 22.99% | 4 | 23.53% |
Olof Johansson | 23 | 13.22% | 1 | 5.88% |
Christoph Lameter | 16 | 9.20% | 2 | 11.76% |
Rusty Russell | 5 | 2.87% | 2 | 11.76% |
Emese Revfy | 1 | 0.57% | 1 | 5.88% |
Total | 174 | 100.00% | 17 | 100.00% |
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
local_irq_disable();
list = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, NULL);
__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
local_irq_enable();
while (list) {
struct tasklet_struct *t = list;
list = list->next;
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED,
&t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
continue;
}
tasklet_unlock(t);
}
local_irq_disable();
t->next = NULL;
*__this_cpu_read(tasklet_hi_vec.tail) = t;
__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
__raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 89 | 51.15% | 1 | 9.09% |
Linus Torvalds | 40 | 22.99% | 4 | 36.36% |
Olof Johansson | 23 | 13.22% | 1 | 9.09% |
Christoph Lameter | 16 | 9.20% | 2 | 18.18% |
Rusty Russell | 5 | 2.87% | 2 | 18.18% |
Emese Revfy | 1 | 0.57% | 1 | 9.09% |
Total | 174 | 100.00% | 11 | 100.00% |
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
t->next = NULL;
t->state = 0;
atomic_set(&t->count, 0);
t->func = func;
t->data = data;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 44 | 75.86% | 1 | 50.00% |
Linus Torvalds | 14 | 24.14% | 1 | 50.00% |
Total | 58 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(tasklet_init);
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
pr_notice("Attempt to kill tasklet from interrupt\n");
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
yield();
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 64 | 94.12% | 3 | 50.00% |
H Hartley Sweeten | 2 | 2.94% | 1 | 16.67% |
Joe Perches | 1 | 1.47% | 1 | 16.67% |
Linus Torvalds | 1 | 1.47% | 1 | 16.67% |
Total | 68 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(tasklet_kill);
/*
* tasklet_hrtimer
*/
/*
* The trampoline is called when the hrtimer expires. It schedules a tasklet
* to run __tasklet_hrtimer_trampoline() which in turn will call the intended
* hrtimer callback, but from softirq context.
*/
static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
{
struct tasklet_hrtimer *ttimer =
container_of(timer, struct tasklet_hrtimer, timer);
tasklet_hi_schedule(&ttimer->tasklet);
return HRTIMER_NORESTART;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
/*
* Helper function which calls the hrtimer callback from
* tasklet/softirq context
*/
static void __tasklet_hrtimer_trampoline(unsigned long data)
{
struct tasklet_hrtimer *ttimer = (void *)data;
enum hrtimer_restart restart;
restart = ttimer->function(&ttimer->timer);
if (restart != HRTIMER_NORESTART)
hrtimer_restart(&ttimer->timer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
/**
* tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
* @ttimer: tasklet_hrtimer which is initialized
* @function: hrtimer callback function which gets called from softirq context
* @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
* @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
*/
void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t which_clock, enum hrtimer_mode mode)
{
hrtimer_init(&ttimer->timer, which_clock, mode);
ttimer->timer.function = __hrtimer_tasklet_trampoline;
tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
(unsigned long)ttimer);
ttimer->function = function;
}
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
void __init softirq_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
per_cpu(tasklet_vec, cpu).tail =
&per_cpu(tasklet_vec, cpu).head;
per_cpu(tasklet_hi_vec, cpu).tail =
&per_cpu(tasklet_hi_vec, cpu).head;
}
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Olof Johansson | 47 | 68.12% | 1 | 25.00% |
Linus Torvalds (pre-git) | 19 | 27.54% | 2 | 50.00% |
Linus Torvalds | 3 | 4.35% | 1 | 25.00% |
Total | 69 | 100.00% | 4 | 100.00% |
static int ksoftirqd_should_run(unsigned int cpu)
{
return local_softirq_pending();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 6 | 42.86% | 1 | 25.00% |
Linus Torvalds | 5 | 35.71% | 1 | 25.00% |
David S. Miller | 2 | 14.29% | 1 | 25.00% |
Zwane Mwaikambo | 1 | 7.14% | 1 | 25.00% |
Total | 14 | 100.00% | 4 | 100.00% |
static void run_ksoftirqd(unsigned int cpu)
{
local_irq_disable();
if (local_softirq_pending()) {
/*
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
__do_softirq();
local_irq_enable();
cond_resched_rcu_qs();
return;
}
local_irq_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 24 | 70.59% | 2 | 28.57% |
Rusty Russell | 4 | 11.76% | 1 | 14.29% |
Linus Torvalds | 2 | 5.88% | 1 | 14.29% |
Calvin Owens | 2 | 5.88% | 1 | 14.29% |
Paul E. McKenney | 1 | 2.94% | 1 | 14.29% |
Frédéric Weisbecker | 1 | 2.94% | 1 | 14.29% |
Total | 34 | 100.00% | 7 | 100.00% |
#ifdef CONFIG_HOTPLUG_CPU
/*
* tasklet_kill_immediate is called to remove a tasklet which can already be
* scheduled for execution on @cpu.
*
* Unlike tasklet_kill, this function removes the tasklet
* _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
*
* When this function is called, @cpu must be in the CPU_DEAD state.
*/
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
{
struct tasklet_struct **i;
BUG_ON(cpu_online(cpu));
BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
if (!test_bit(TASKLET_STATE_SCHED, &t->state))
return;
/* CPU is dead, so no lock needed. */
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
if (*i == t) {
*i = t->next;
/* If this was the tail element, move the tail ptr */
if (*i == NULL)
per_cpu(tasklet_vec, cpu).tail = i;
return;
}
}
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 104 | 83.87% | 1 | 50.00% |
Olof Johansson | 20 | 16.13% | 1 | 50.00% |
Total | 124 | 100.00% | 2 | 100.00% |
static int takeover_tasklets(unsigned int cpu)
{
/* CPU is dead, so no lock needed. */
local_irq_disable();
/* Find end, append list for that CPU. */
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
per_cpu(tasklet_vec, cpu).head = NULL;
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
}
raise_softirq_irqoff(TASKLET_SOFTIRQ);
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
per_cpu(tasklet_hi_vec, cpu).head = NULL;
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
}
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Olof Johansson | 72 | 35.47% | 1 | 20.00% |
Rusty Russell | 71 | 34.98% | 1 | 20.00% |
Christian Bornträger | 46 | 22.66% | 1 | 20.00% |
Christoph Lameter | 10 | 4.93% | 1 | 20.00% |
Sebastian Andrzej Siewior | 4 | 1.97% | 1 | 20.00% |
Total | 203 | 100.00% | 5 | 100.00% |
#else
#define takeover_tasklets NULL
#endif /* CONFIG_HOTPLUG_CPU */
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
};
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rusty Russell | 10 | 31.25% | 1 | 14.29% |
Sebastian Andrzej Siewior | 8 | 25.00% | 1 | 14.29% |
Linus Torvalds | 4 | 12.50% | 1 | 14.29% |
Andrew Morton | 4 | 12.50% | 1 | 14.29% |
Thomas Gleixner | 3 | 9.38% | 1 | 14.29% |
Akinobu Mita | 2 | 6.25% | 1 | 14.29% |
Eduard - Gabriel Munteanu | 1 | 3.12% | 1 | 14.29% |
Total | 32 | 100.00% | 7 | 100.00% |
early_initcall(spawn_ksoftirqd);
/*
* [ These __weak aliases are kept in a separate compilation unit, so that
* GCC does not inline them incorrectly. ]
*/
int __init __weak early_irq_init(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
int __init __weak arch_probe_nr_irqs(void)
{
return NR_IRQS_LEGACY;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 11 | 91.67% | 1 | 50.00% |
Thomas Gleixner | 1 | 8.33% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
int __init __weak arch_early_irq_init(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
{
return from;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 391 | 15.54% | 13 | 9.35% |
Linus Torvalds | 286 | 11.37% | 8 | 5.76% |
Peter Zijlstra | 270 | 10.73% | 7 | 5.04% |
Rusty Russell | 259 | 10.29% | 8 | 5.76% |
Olof Johansson | 216 | 8.59% | 1 | 0.72% |
Thomas Gleixner | 163 | 6.48% | 12 | 8.63% |
Ingo Molnar | 143 | 5.68% | 6 | 4.32% |
Frédéric Weisbecker | 141 | 5.60% | 17 | 12.23% |
Andrew Morton | 73 | 2.90% | 8 | 5.76% |
Eric Dumazet | 65 | 2.58% | 3 | 2.16% |
Christoph Lameter | 53 | 2.11% | 2 | 1.44% |
Venkatesh Pallipadi | 50 | 1.99% | 3 | 2.16% |
Christian Bornträger | 46 | 1.83% | 1 | 0.72% |
Steven Rostedt | 45 | 1.79% | 4 | 2.88% |
Joe Perches | 39 | 1.55% | 3 | 2.16% |
Yinghai Lu | 36 | 1.43% | 2 | 1.44% |
Jason Baron | 29 | 1.15% | 2 | 1.44% |
Christoph Hellwig | 23 | 0.91% | 2 | 1.44% |
Sebastian Andrzej Siewior | 22 | 0.87% | 2 | 1.44% |
Mel Gorman | 22 | 0.87% | 1 | 0.72% |
Arnaldo Carvalho de Melo | 20 | 0.79% | 1 | 0.72% |
Tim Chen | 19 | 0.76% | 1 | 0.72% |
Heiko Carstens | 15 | 0.60% | 2 | 1.44% |
Ben Greear | 13 | 0.52% | 1 | 0.72% |
David S. Miller | 9 | 0.36% | 2 | 1.44% |
Paul Mackerras | 8 | 0.32% | 1 | 0.72% |
Johannes Berg | 8 | 0.32% | 1 | 0.72% |
Eduard - Gabriel Munteanu | 6 | 0.24% | 1 | 0.72% |
Andi Kleen | 5 | 0.20% | 2 | 1.44% |
Martin Schwidefsky | 4 | 0.16% | 1 | 0.72% |
Dipankar Sarma | 4 | 0.16% | 1 | 0.72% |
Keika Kobayashi | 4 | 0.16% | 1 | 0.72% |
Paul E. McKenney | 4 | 0.16% | 2 | 1.44% |
Rafael J. Wysocki | 3 | 0.12% | 1 | 0.72% |
Emese Revfy | 2 | 0.08% | 1 | 0.72% |
Akinobu Mita | 2 | 0.08% | 1 | 0.72% |
Calvin Owens | 2 | 0.08% | 1 | 0.72% |
David Shaohua Li | 2 | 0.08% | 1 | 0.72% |
Davidlohr Bueso A | 2 | 0.08% | 1 | 0.72% |
H Hartley Sweeten | 2 | 0.08% | 1 | 0.72% |
Alexander Potapenko | 1 | 0.04% | 1 | 0.72% |
Dave Jones | 1 | 0.04% | 1 | 0.72% |
Neil Brown | 1 | 0.04% | 1 | 0.72% |
Robert Love | 1 | 0.04% | 1 | 0.72% |
Zwane Mwaikambo | 1 | 0.04% | 1 | 0.72% |
Li Zefan | 1 | 0.04% | 1 | 0.72% |
Sagi Grimberg | 1 | 0.04% | 1 | 0.72% |
Paul Gortmaker | 1 | 0.04% | 1 | 0.72% |
Lucas De Marchi | 1 | 0.04% | 1 | 0.72% |
Alexey Dobriyan | 1 | 0.04% | 1 | 0.72% |
Total | 2516 | 100.00% | 139 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.