Release 4.15 kernel/sched/rt.c
// SPDX-License-Identifier: GPL-2.0
/*
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
* policies)
*/
#include "sched.h"
#include <linux/slab.h>
#include <linux/irq_work.h>
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
int idle = 0;
int overrun;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
if (!overrun)
break;
raw_spin_unlock(&rt_b->rt_runtime_lock);
idle = do_sched_rt_period_timer(rt_b, overrun);
raw_spin_lock(&rt_b->rt_runtime_lock);
}
if (idle)
rt_b->rt_period_active = 0;
raw_spin_unlock(&rt_b->rt_runtime_lock);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 117 | 100.00% | 3 | 100.00% |
Total | 117 | 100.00% | 3 | 100.00% |
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 59 | 100.00% | 1 | 100.00% |
Total | 59 | 100.00% | 1 | 100.00% |
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
raw_spin_lock(&rt_b->rt_runtime_lock);
if (!rt_b->rt_period_active) {
rt_b->rt_period_active = 1;
/*
* SCHED_DEADLINE updates the bandwidth, as a run away
* RT task with a DL task could hog a CPU. But DL does
* not reset the period. If a deadline task was running
* without an RT task running, it can cause RT tasks to
* throttle when they start up. Kick the timer right away
* to update the period.
*/
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 74 | 93.67% | 2 | 66.67% |
Steven Rostedt | 5 | 6.33% | 1 | 33.33% |
Total | 79 | 100.00% | 3 | 100.00% |
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO;
rt_rq->highest_prio.next = MAX_RT_PRIO;
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 134 | 93.06% | 1 | 33.33% |
Kirill V Tkhai | 7 | 4.86% | 1 | 33.33% |
Steven Rostedt | 3 | 2.08% | 1 | 33.33% |
Total | 144 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
return container_of(rt_se, struct task_struct, rt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_se->rt_rq;
return rt_rq->rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill V Tkhai | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
void free_rt_sched_group(struct task_group *tg)
{
int i;
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
rt_rq->highest_prio.curr = MAX_RT_PRIO;
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 131 | 100.00% | 2 | 100.00% |
Total | 131 | 100.00% | 2 | 100.00% |
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
int i;
tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_rt_rq(rt_rq);
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 184 | 90.64% | 2 | 66.67% |
Gregory Haskins | 19 | 9.36% | 1 | 33.33% |
Total | 203 | 100.00% | 3 | 100.00% |
#else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
return task_rq(p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 21 | 70.00% | 1 | 50.00% |
Kirill V Tkhai | 9 | 30.00% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct rq *rq = rq_of_rt_se(rt_se);
return &rq->rt;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 15 | 50.00% | 1 | 50.00% |
Kirill V Tkhai | 15 | 50.00% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
void free_rt_sched_group(struct task_group *tg) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 9 | 100.00% | 1 | 100.00% |
Total | 9 | 100.00% | 1 | 100.00% |
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
return rq->rt.highest_prio.curr > prev->prio;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 15 | 65.22% | 1 | 50.00% |
Gregory Haskins | 8 | 34.78% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
static inline void rt_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
*
* Matched by the barrier in pull_rt_task().
*/
smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 27 | 57.45% | 1 | 20.00% |
Gregory Haskins | 17 | 36.17% | 2 | 40.00% |
Peter Zijlstra | 2 | 4.26% | 1 | 20.00% |
Rusty Russell | 1 | 2.13% | 1 | 20.00% |
Total | 47 | 100.00% | 5 | 100.00% |
static inline void rt_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 24 | 54.55% | 1 | 20.00% |
Gregory Haskins | 19 | 43.18% | 3 | 60.00% |
Rusty Russell | 1 | 2.27% | 1 | 20.00% |
Total | 44 | 100.00% | 5 | 100.00% |
static void update_rt_migration(struct rt_rq *rt_rq)
{
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
}
} else if (rt_rq->overloaded) {
rt_clear_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 70 | 98.59% | 4 | 80.00% |
Peter Zijlstra | 1 | 1.41% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 47 | 67.14% | 3 | 50.00% |
Gregory Haskins | 21 | 30.00% | 2 | 33.33% |
Ingo Molnar | 2 | 2.86% | 1 | 16.67% |
Total | 70 | 100.00% | 6 | 100.00% |
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 37 | 52.86% | 2 | 50.00% |
Gregory Haskins | 31 | 44.29% | 1 | 25.00% |
Ingo Molnar | 2 | 2.86% | 1 | 25.00% |
Total | 70 | 100.00% | 4 | 100.00% |
static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq)
{
if (!has_pushable_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 38 | 100.00% | 3 | 100.00% |
Total | 38 | 100.00% | 3 | 100.00% |
static inline void queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
rq->rt.highest_prio.next = p->prio;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 58 | 68.24% | 2 | 66.67% |
Steven Rostedt | 27 | 31.76% | 1 | 33.33% |
Total | 85 | 100.00% | 3 | 100.00% |
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the new highest prio pushable task */
if (has_pushable_tasks(rq)) {
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
rq->rt.highest_prio.next = p->prio;
} else
rq->rt.highest_prio.next = MAX_RT_PRIO;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 39 | 48.15% | 1 | 33.33% |
Gregory Haskins | 30 | 37.04% | 1 | 33.33% |
Ingo Molnar | 12 | 14.81% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 15 | 93.75% | 2 | 66.67% |
Peter Zijlstra | 1 | 6.25% | 1 | 33.33% |
Total | 16 | 100.00% | 3 | 100.00% |
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 15 | 93.75% | 2 | 66.67% |
Peter Zijlstra | 1 | 6.25% | 1 | 33.33% |
Total | 16 | 100.00% | 3 | 100.00% |
static inline
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 11 | 68.75% | 1 | 33.33% |
Peter Zijlstra | 5 | 31.25% | 2 | 66.67% |
Total | 16 | 100.00% | 3 | 100.00% |
static inline
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gregory Haskins | 9 | 56.25% | 1 | 33.33% |
Peter Zijlstra | 7 | 43.75% | 2 | 66.67% |
Total | 16 | 100.00% | 3 | 100.00% |
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static inline void pull_rt_task(struct rq *this_rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 11 | 100.00% | 2 | 100.00% |
Total | 11 | 100.00% | 2 | 100.00% |
static inline void queue_push_tasks(struct rq *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 11 | 100.00% | 2 | 100.00% |
Total | 11 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_SMP */
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->on_rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 17 | 100.00% | 2 | 100.00% |
Total | 17 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
if (!rt_rq->tg)
return RUNTIME_INF;
return rt_rq->rt_runtime;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 27 | 100.00% | 3 | 100.00% |
Total | 27 | 100.00% | 3 | 100.00% |
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 24 | 100.00% | 3 | 100.00% |
Total | 24 | 100.00% | 3 | 100.00% |
typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg)
{
do {
tg = list_entry_rcu(tg->list.next,
typeof(struct task_group), list);
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
if (&tg->list == &task_groups)
tg = NULL;
return tg;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yong Zhang | 69 | 100.00% | 1 | 100.00% |
Total | 69 | 100.00% | 1 | 100.00% |
#define for_each_rt_rq(rt_rq, iter, rq) \
for (iter = container_of(&task_groups, typeof(*iter), list); \
(iter = next_task_group(iter)) && \
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->my_q;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct rq *rq = rq_of_rt_rq(rt_rq);
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq);
rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) {
if (!rt_se)
enqueue_top_rt_rq(rt_rq);
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
if (rt_rq->highest_prio.curr < curr->prio)
resched_curr(rq);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 47 | 43.52% | 4 | 36.36% |
Kirill V Tkhai | 23 | 21.30% | 2 | 18.18% |
Dario Faggioli | 19 | 17.59% | 1 | 9.09% |
Yong Zhang | 8 | 7.41% | 1 | 9.09% |
Balbir Singh | 8 | 7.41% | 1 | 9.09% |
Gregory Haskins | 2 | 1.85% | 1 | 9.09% |
Thomas Gleixner | 1 | 0.93% | 1 | 9.09% |
Total | 108 | 100.00% | 11 | 100.00% |
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[cpu];
if (!rt_se)
dequeue_top_rt_rq(rt_rq);
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 33 | 52.38% | 3 | 50.00% |
Balbir Singh | 12 | 19.05% | 1 | 16.67% |
Kirill V Tkhai | 10 | 15.87% | 1 | 16.67% |
Yong Zhang | 8 | 12.70% | 1 | 16.67% |
Total | 63 | 100.00% | 6 | 100.00% |
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill V Tkhai | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = group_rt_rq(rt_se);
struct task_struct *p;
if (rt_rq)
return !!rt_rq->rt_nr_boosted;
p = rt_task_of(rt_se);
return p->prio != p->normal_prio;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void)
{
return this_rq()->rd->span;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 15 | 75.00% | 1 | 33.33% |
Rusty Russell | 4 | 20.00% | 1 | 33.33% |
Nathan Zimmer | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
#else
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 10 | 66.67% | 1 | 50.00% |
Rusty Russell | 5 | 33.33% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
#endif
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &rt_rq->tg->rt_bandwidth;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
return rt_rq->rt_runtime;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 17 | 100.00% | 3 | 100.00% |
Total | 17 | 100.00% | 3 | 100.00% |
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(def_rt_bandwidth.rt_period);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 20 | 100.00% | 3 | 100.00% |
Total | 20 | 100.00% | 3 | 100.00% |
typedef struct rt_rq *rt_rq_iter_t;
#define for_each_rt_rq(rt_rq, iter, rq) \
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
if (!rt_rq->rt_nr_running)
return;
enqueue_top_rt_rq(rt_rq);
resched_curr(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill V Tkhai | 17 | 42.50% | 2 | 40.00% |
John Blackwood | 13 | 32.50% | 1 | 20.00% |
Peter Zijlstra | 10 | 25.00% | 2 | 40.00% |
Total | 40 | 100.00% | 5 | 100.00% |
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
dequeue_top_rt_rq(rt_rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 10 | 58.82% | 2 | 66.67% |
Kirill V Tkhai | 7 | 41.18% | 1 | 33.33% |
Total | 17 | 100.00% | 3 | 100.00% |
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill V Tkhai | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 10 | 66.67% | 1 | 50.00% |
Rusty Russell | 5 | 33.33% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return &cpu_rq(cpu)->rt;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
static inline struct rt_bandwidth *sched_rt_bandwidth(struct