cregit-Linux how code gets into the kernel

Release 4.15 kernel/sched/rt.c

Directory: kernel/sched
// SPDX-License-Identifier: GPL-2.0
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */

#include "sched.h"

#include <linux/slab.h>
#include <linux/irq_work.h>


int sched_rr_timeslice = RR_TIMESLICE;

int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;

static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);


struct rt_bandwidth def_rt_bandwidth;


static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) { struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer); int idle = 0; int overrun; raw_spin_lock(&rt_b->rt_runtime_lock); for (;;) { overrun = hrtimer_forward_now(timer, rt_b->rt_period); if (!overrun) break; raw_spin_unlock(&rt_b->rt_runtime_lock); idle = do_sched_rt_period_timer(rt_b, overrun); raw_spin_lock(&rt_b->rt_runtime_lock); } if (idle) rt_b->rt_period_active = 0; raw_spin_unlock(&rt_b->rt_runtime_lock); return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra117100.00%3100.00%
Total117100.00%3100.00%


void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) { rt_b->rt_period = ns_to_ktime(period); rt_b->rt_runtime = runtime; raw_spin_lock_init(&rt_b->rt_runtime_lock); hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rt_b->rt_period_timer.function = sched_rt_period_timer; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra59100.00%1100.00%
Total59100.00%1100.00%


static void start_rt_bandwidth(struct rt_bandwidth *rt_b) { if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) return; raw_spin_lock(&rt_b->rt_runtime_lock); if (!rt_b->rt_period_active) { rt_b->rt_period_active = 1; /* * SCHED_DEADLINE updates the bandwidth, as a run away * RT task with a DL task could hog a CPU. But DL does * not reset the period. If a deadline task was running * without an RT task running, it can cause RT tasks to * throttle when they start up. Kick the timer right away * to update the period. */ hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED); } raw_spin_unlock(&rt_b->rt_runtime_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra7493.67%266.67%
Steven Rostedt56.33%133.33%
Total79100.00%3100.00%


void init_rt_rq(struct rt_rq *rt_rq) { struct rt_prio_array *array; int i; array = &rt_rq->active; for (i = 0; i < MAX_RT_PRIO; i++) { INIT_LIST_HEAD(array->queue + i); __clear_bit(i, array->bitmap); } /* delimiter for bitsearch: */ __set_bit(MAX_RT_PRIO, array->bitmap); #if defined CONFIG_SMP rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->highest_prio.next = MAX_RT_PRIO; rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init(&rt_rq->pushable_tasks); #endif /* CONFIG_SMP */ /* We start is dequeued state, because no RT tasks are queued */ rt_rq->rt_queued = 0; rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; raw_spin_lock_init(&rt_rq->rt_runtime_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra13493.06%133.33%
Kirill V Tkhai74.86%133.33%
Steven Rostedt32.08%133.33%
Total144100.00%3100.00%

#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) { hrtimer_cancel(&rt_b->rt_period_timer); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%

#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { #ifdef CONFIG_SCHED_DEBUG WARN_ON_ONCE(!rt_entity_is_task(rt_se)); #endif return container_of(rt_se, struct task_struct, rt); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra39100.00%1100.00%
Total39100.00%1100.00%


static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return rt_rq->rq; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%


static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { return rt_se->rt_rq; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%


static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_se->rt_rq; return rt_rq->rq; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill V Tkhai28100.00%1100.00%
Total28100.00%1100.00%


void free_rt_sched_group(struct task_group *tg) { int i; if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); if (tg->rt_se) kfree(tg->rt_se[i]); } kfree(tg->rt_rq); kfree(tg->rt_se); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra79100.00%1100.00%
Total79100.00%1100.00%


void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, struct sched_rt_entity *parent) { struct rq *rq = cpu_rq(cpu); rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->rt_nr_boosted = 0; rt_rq->rq = rq; rt_rq->tg = tg; tg->rt_rq[cpu] = rt_rq; tg->rt_se[cpu] = rt_se; if (!rt_se) return; if (!parent) rt_se->rt_rq = &rq->rt; else rt_se->rt_rq = parent->my_q; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra131100.00%2100.00%
Total131100.00%2100.00%


int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_rq) goto err; tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_se) goto err; init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0); for_each_possible_cpu(i) { rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq) goto err; rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i)); if (!rt_se) goto err_free_rq; init_rt_rq(rt_rq); rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); } return 1; err_free_rq: kfree(rt_rq); err: return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra18490.64%266.67%
Gregory Haskins199.36%133.33%
Total203100.00%3100.00%

#else /* CONFIG_RT_GROUP_SCHED */ #define rt_entity_is_task(rt_se) (1)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { return container_of(rt_se, struct task_struct, rt); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra25100.00%1100.00%
Total25100.00%1100.00%


static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return container_of(rt_rq, struct rq, rt); }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins25100.00%1100.00%
Total25100.00%1100.00%


static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) { struct task_struct *p = rt_task_of(rt_se); return task_rq(p); }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins2170.00%150.00%
Kirill V Tkhai930.00%150.00%
Total30100.00%2100.00%


static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { struct rq *rq = rq_of_rt_se(rt_se); return &rq->rt; }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins1550.00%150.00%
Kirill V Tkhai1550.00%150.00%
Total30100.00%2100.00%


void free_rt_sched_group(struct task_group *tg) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra9100.00%1100.00%
Total9100.00%1100.00%


int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra18100.00%1100.00%
Total18100.00%1100.00%

#endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SMP static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) { /* Try to pull RT tasks here if we lower this rq's prio */ return rq->rt.highest_prio.curr > prev->prio; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra31100.00%1100.00%
Total31100.00%1100.00%


static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt1565.22%150.00%
Gregory Haskins834.78%150.00%
Total23100.00%2100.00%


static inline void rt_set_overload(struct rq *rq) { if (!rq->online) return; cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); /* * Make sure the mask is visible before we set * the overload count. That is checked to determine * if we should look at the mask. It would be a shame * if we looked at the mask, but the mask was not * updated yet. * * Matched by the barrier in pull_rt_task(). */ smp_wmb(); atomic_inc(&rq->rd->rto_count); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2757.45%120.00%
Gregory Haskins1736.17%240.00%
Peter Zijlstra24.26%120.00%
Rusty Russell12.13%120.00%
Total47100.00%5100.00%


static inline void rt_clear_overload(struct rq *rq) { if (!rq->online) return; /* the order here really doesn't matter */ atomic_dec(&rq->rd->rto_count); cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt2454.55%120.00%
Gregory Haskins1943.18%360.00%
Rusty Russell12.27%120.00%
Total44100.00%5100.00%


static void update_rt_migration(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { if (!rt_rq->overloaded) { rt_set_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 1; } } else if (rt_rq->overloaded) { rt_clear_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins7098.59%480.00%
Peter Zijlstra11.41%120.00%
Total71100.00%5100.00%


static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4767.14%350.00%
Gregory Haskins2130.00%233.33%
Ingo Molnar22.86%116.67%
Total70100.00%6100.00%


static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3752.86%250.00%
Gregory Haskins3144.29%125.00%
Ingo Molnar22.86%125.00%
Total70100.00%4100.00%


static inline int has_pushable_tasks(struct rq *rq) { return !plist_head_empty(&rq->rt.pushable_tasks); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt24100.00%1100.00%
Total24100.00%1100.00%

static DEFINE_PER_CPU(struct callback_head, rt_push_head); static DEFINE_PER_CPU(struct callback_head, rt_pull_head); static void push_rt_tasks(struct rq *); static void pull_rt_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq) { if (!has_pushable_tasks(rq)) return; queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra38100.00%3100.00%
Total38100.00%3100.00%


static inline void queue_pull_task(struct rq *rq) { queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra29100.00%1100.00%
Total29100.00%1100.00%


static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); plist_node_init(&p->pushable_tasks, p->prio); plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the highest prio pushable task */ if (p->prio < rq->rt.highest_prio.next) rq->rt.highest_prio.next = p->prio; }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins5868.24%266.67%
Steven Rostedt2731.76%133.33%
Total85100.00%3100.00%


static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the new highest prio pushable task */ if (has_pushable_tasks(rq)) { p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks); rq->rt.highest_prio.next = p->prio; } else rq->rt.highest_prio.next = MAX_RT_PRIO; }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3948.15%133.33%
Gregory Haskins3037.04%133.33%
Ingo Molnar1214.81%133.33%
Total81100.00%3100.00%

#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins1593.75%266.67%
Peter Zijlstra16.25%133.33%
Total16100.00%3100.00%


static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) { }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins1593.75%266.67%
Peter Zijlstra16.25%133.33%
Total16100.00%3100.00%


static inline void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins1168.75%133.33%
Peter Zijlstra531.25%266.67%
Total16100.00%3100.00%


static inline void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { }

Contributors

PersonTokensPropCommitsCommitProp
Gregory Haskins956.25%133.33%
Peter Zijlstra743.75%266.67%
Total16100.00%3100.00%


static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra20100.00%1100.00%
Total20100.00%1100.00%


static inline void pull_rt_task(struct rq *this_rq) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra11100.00%2100.00%
Total11100.00%2100.00%


static inline void queue_push_tasks(struct rq *rq) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra11100.00%2100.00%
Total11100.00%2100.00%

#endif /* CONFIG_SMP */ static void enqueue_top_rt_rq(struct rt_rq *rt_rq); static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
static inline int on_rt_rq(struct sched_rt_entity *rt_se) { return rt_se->on_rq; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%2100.00%
Total17100.00%2100.00%

#ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { if (!rt_rq->tg) return RUNTIME_INF; return rt_rq->rt_runtime; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra27100.00%3100.00%
Total27100.00%3100.00%


static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra24100.00%3100.00%
Total24100.00%3100.00%

typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg) { do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); if (&tg->list == &task_groups) tg = NULL; return tg; }

Contributors

PersonTokensPropCommitsCommitProp
Yong Zhang69100.00%1100.00%
Total69100.00%1100.00%

#define for_each_rt_rq(rt_rq, iter, rq) \ for (iter = container_of(&task_groups, typeof(*iter), list); \ (iter = next_task_group(iter)) && \ (rt_rq = iter->rt_rq[cpu_of(rq)]);) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = rt_se->parent)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return rt_se->my_q; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%

static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct rq *rq = rq_of_rt_rq(rt_rq); struct sched_rt_entity *rt_se; int cpu = cpu_of(rq); rt_se = rt_rq->tg->rt_se[cpu]; if (rt_rq->rt_nr_running) { if (!rt_se) enqueue_top_rt_rq(rt_rq); else if (!on_rt_rq(rt_se)) enqueue_rt_entity(rt_se, 0); if (rt_rq->highest_prio.curr < curr->prio) resched_curr(rq); } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4743.52%436.36%
Kirill V Tkhai2321.30%218.18%
Dario Faggioli1917.59%19.09%
Yong Zhang87.41%19.09%
Balbir Singh87.41%19.09%
Gregory Haskins21.85%19.09%
Thomas Gleixner10.93%19.09%
Total108100.00%11100.00%


static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { struct sched_rt_entity *rt_se; int cpu = cpu_of(rq_of_rt_rq(rt_rq)); rt_se = rt_rq->tg->rt_se[cpu]; if (!rt_se) dequeue_top_rt_rq(rt_rq); else if (on_rt_rq(rt_se)) dequeue_rt_entity(rt_se, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3352.38%350.00%
Balbir Singh1219.05%116.67%
Kirill V Tkhai1015.87%116.67%
Yong Zhang812.70%116.67%
Total63100.00%6100.00%


static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill V Tkhai22100.00%1100.00%
Total22100.00%1100.00%


static int rt_se_boosted(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); struct task_struct *p; if (rt_rq) return !!rt_rq->rt_nr_boosted; p = rt_task_of(rt_se); return p->prio != p->normal_prio; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra53100.00%1100.00%
Total53100.00%1100.00%

#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void) { return this_rq()->rd->span; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1575.00%133.33%
Rusty Russell420.00%133.33%
Nathan Zimmer15.00%133.33%
Total20100.00%3100.00%

#else
static inline const struct cpumask *sched_rt_period_mask(void) { return cpu_online_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1066.67%150.00%
Rusty Russell533.33%150.00%
Total15100.00%2100.00%

#endif
static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra33100.00%1100.00%
Total33100.00%1100.00%


static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &rt_rq->tg->rt_bandwidth; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra22100.00%1100.00%
Total22100.00%1100.00%

#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { return rt_rq->rt_runtime; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%3100.00%
Total17100.00%3100.00%


static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(def_rt_bandwidth.rt_period); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra20100.00%3100.00%
Total20100.00%3100.00%

typedef struct rt_rq *rt_rq_iter_t; #define for_each_rt_rq(rt_rq, iter, rq) \ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = NULL)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%1100.00%
Total17100.00%1100.00%


static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { struct rq *rq = rq_of_rt_rq(rt_rq); if (!rt_rq->rt_nr_running) return; enqueue_top_rt_rq(rt_rq); resched_curr(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill V Tkhai1742.50%240.00%
John Blackwood1332.50%120.00%
Peter Zijlstra1025.00%240.00%
Total40100.00%5100.00%


static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { dequeue_top_rt_rq(rt_rq); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1058.82%266.67%
Kirill V Tkhai741.18%133.33%
Total17100.00%3100.00%


static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill V Tkhai17100.00%1100.00%
Total17100.00%1100.00%


static inline const struct cpumask *sched_rt_period_mask(void) { return cpu_online_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1066.67%150.00%
Rusty Russell533.33%150.00%
Total15100.00%2100.00%


static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return &cpu_rq(cpu)->rt; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra26100.00%1100.00%
Total26100.00%1100.00%


static inline struct rt_bandwidth *sched_rt_bandwidth(struct