Release 4.11 kernel/sched/idle_task.c
#include "sched.h"
/*
* idle-task scheduling class.
*
* (NOTE: these are not related to SCHED_IDLE tasks which are
* handled in sched/fair.c)
*/
#ifdef CONFIG_SMP
static int
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 20 | 74.07% | 1 | 20.00% |
Peter Zijlstra | 7 | 25.93% | 4 | 80.00% |
Total | 27 | 100.00% | 5 | 100.00% |
#endif /* CONFIG_SMP */
/*
* Idle tasks are unconditionally rescheduled:
*/
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
{
resched_curr(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 20 | 83.33% | 1 | 25.00% |
Peter Zijlstra | 3 | 12.50% | 2 | 50.00% |
Kirill V Tkhai | 1 | 4.17% | 1 | 25.00% |
Total | 24 | 100.00% | 4 | 100.00% |
static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
put_prev_task(rq, prev);
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
return rq->idle;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 24 | 51.06% | 1 | 16.67% |
Peter Zijlstra | 19 | 40.43% | 3 | 50.00% |
Matt Fleming | 3 | 6.38% | 1 | 16.67% |
Josh Poimboeuf | 1 | 2.13% | 1 | 16.67% |
Total | 47 | 100.00% | 6 | 100.00% |
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
raw_spin_lock_irq(&rq->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 39 | 88.64% | 1 | 25.00% |
Peter Zijlstra | 3 | 6.82% | 2 | 50.00% |
Thomas Gleixner | 2 | 4.55% | 1 | 25.00% |
Total | 44 | 100.00% | 4 | 100.00% |
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
rq_last_tick_reset(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 14 | 66.67% | 1 | 50.00% |
Peter Zijlstra | 7 | 33.33% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 15 | 83.33% | 1 | 50.00% |
Peter Zijlstra | 3 | 16.67% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
static void set_curr_task_idle(struct rq *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srivatsa Vaddagiri | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
static void switched_to_idle(struct rq *rq, struct task_struct *p)
{
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 17 | 89.47% | 1 | 50.00% |
Peter Zijlstra | 2 | 10.53% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static void
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
{
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 20 | 90.91% | 1 | 50.00% |
Peter Zijlstra | 2 | 9.09% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Williams | 14 | 70.00% | 1 | 33.33% |
Thomas Gleixner | 5 | 25.00% | 1 | 33.33% |
H Hartley Sweeten | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
static void update_curr_idle(struct rq *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
const struct sched_class idle_sched_class = {
/* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle,
.check_preempt_curr = check_preempt_curr_idle,
.pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,
.get_rr_interval = get_rr_interval_idle,
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
};
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 150 | 42.86% | 2 | 6.06% |
Peter Zijlstra | 54 | 15.43% | 15 | 45.45% |
Steven Rostedt | 46 | 13.14% | 1 | 3.03% |
Gregory Haskins | 26 | 7.43% | 1 | 3.03% |
Peter Williams | 25 | 7.14% | 3 | 9.09% |
Thomas Gleixner | 22 | 6.29% | 3 | 9.09% |
Srivatsa Vaddagiri | 15 | 4.29% | 1 | 3.03% |
Li Zefan | 4 | 1.14% | 1 | 3.03% |
Matt Fleming | 3 | 0.86% | 1 | 3.03% |
Hiroshi Shimamoto | 1 | 0.29% | 1 | 3.03% |
Josh Poimboeuf | 1 | 0.29% | 1 | 3.03% |
Kirill V Tkhai | 1 | 0.29% | 1 | 3.03% |
Borislav Petkov | 1 | 0.29% | 1 | 3.03% |
H Hartley Sweeten | 1 | 0.29% | 1 | 3.03% |
Total | 350 | 100.00% | 33 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.