Release 4.10 kernel/sched/idle_task.c
#include "sched.h"
/*
* idle-task scheduling class.
*
* (NOTE: these are not related to SCHED_IDLE tasks which are
* handled in sched/fair.c)
*/
#ifdef CONFIG_SMP
static int
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
gregory haskins | gregory haskins | 20 | 74.07% | 1 | 20.00% |
peter zijlstra | peter zijlstra | 7 | 25.93% | 4 | 80.00% |
| Total | 27 | 100.00% | 5 | 100.00% |
#endif /* CONFIG_SMP */
/*
* Idle tasks are unconditionally rescheduled:
*/
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
{
resched_curr(rq);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 20 | 83.33% | 1 | 25.00% |
peter zijlstra | peter zijlstra | 3 | 12.50% | 2 | 50.00% |
kirill v tkhai | kirill v tkhai | 1 | 4.17% | 1 | 25.00% |
| Total | 24 | 100.00% | 4 | 100.00% |
static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
put_prev_task(rq, prev);
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
return rq->idle;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 24 | 52.17% | 1 | 20.00% |
peter zijlstra | peter zijlstra | 21 | 45.65% | 3 | 60.00% |
josh poimboeuf | josh poimboeuf | 1 | 2.17% | 1 | 20.00% |
| Total | 46 | 100.00% | 5 | 100.00% |
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
raw_spin_lock_irq(&rq->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 39 | 88.64% | 1 | 25.00% |
peter zijlstra | peter zijlstra | 3 | 6.82% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 2 | 4.55% | 1 | 25.00% |
| Total | 44 | 100.00% | 4 | 100.00% |
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
rq_last_tick_reset(rq);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 14 | 66.67% | 1 | 50.00% |
peter zijlstra | peter zijlstra | 7 | 33.33% | 1 | 50.00% |
| Total | 21 | 100.00% | 2 | 100.00% |
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 15 | 83.33% | 1 | 50.00% |
peter zijlstra | peter zijlstra | 3 | 16.67% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
static void set_curr_task_idle(struct rq *rq)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
srivatsa vaddagiri | srivatsa vaddagiri | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
static void switched_to_idle(struct rq *rq, struct task_struct *p)
{
BUG();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven rostedt | steven rostedt | 17 | 89.47% | 1 | 50.00% |
peter zijlstra | peter zijlstra | 2 | 10.53% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static void
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
{
BUG();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
steven rostedt | steven rostedt | 20 | 90.91% | 1 | 50.00% |
peter zijlstra | peter zijlstra | 2 | 9.09% | 1 | 50.00% |
| Total | 22 | 100.00% | 2 | 100.00% |
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter williams | peter williams | 14 | 70.00% | 1 | 33.33% |
thomas gleixner | thomas gleixner | 5 | 25.00% | 1 | 33.33% |
h hartley sweeten | h hartley sweeten | 1 | 5.00% | 1 | 33.33% |
| Total | 20 | 100.00% | 3 | 100.00% |
static void update_curr_idle(struct rq *rq)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
const struct sched_class idle_sched_class = {
/* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle,
.check_preempt_curr = check_preempt_curr_idle,
.pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,
.get_rr_interval = get_rr_interval_idle,
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
};
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 150 | 42.98% | 2 | 6.25% |
peter zijlstra | peter zijlstra | 56 | 16.05% | 15 | 46.88% |
steven rostedt | steven rostedt | 46 | 13.18% | 1 | 3.12% |
gregory haskins | gregory haskins | 26 | 7.45% | 1 | 3.12% |
peter williams | peter williams | 25 | 7.16% | 3 | 9.38% |
thomas gleixner | thomas gleixner | 22 | 6.30% | 3 | 9.38% |
srivatsa vaddagiri | srivatsa vaddagiri | 15 | 4.30% | 1 | 3.12% |
li zefan | li zefan | 4 | 1.15% | 1 | 3.12% |
josh poimboeuf | josh poimboeuf | 1 | 0.29% | 1 | 3.12% |
hiroshi shimamoto | hiroshi shimamoto | 1 | 0.29% | 1 | 3.12% |
borislav petkov | borislav petkov | 1 | 0.29% | 1 | 3.12% |
h hartley sweeten | h hartley sweeten | 1 | 0.29% | 1 | 3.12% |
kirill v tkhai | kirill v tkhai | 1 | 0.29% | 1 | 3.12% |
| Total | 349 | 100.00% | 32 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.