Release 4.15 kernel/sched/deadline.c
// SPDX-License-Identifier: GPL-2.0
/*
* Deadline Scheduling Class (SCHED_DEADLINE)
*
* Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
*
* Tasks that periodically executes their instances for less than their
* runtime won't miss any of their deadlines.
* Tasks that are not periodic or sporadic or that tries to execute more
* than their reserved bandwidth will be slowed down (and may potentially
* miss some of their deadlines), and won't affect any other task.
*
* Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
* Juri Lelli <juri.lelli@gmail.com>,
* Michael Trimarchi <michael@amarulasolutions.com>,
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
{
return container_of(dl_se, struct task_struct, dl);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
{
return container_of(dl_rq, struct rq, dl);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = task_rq(p);
return &rq->dl;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static inline int on_dl_rq(struct sched_dl_entity *dl_se)
{
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_SMP
static inline struct dl_bw *dl_bw_of(int i)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nico Pitre | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
static inline int dl_bw_cpus(int i)
{
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
return cpus;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nico Pitre | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
#else
static inline struct dl_bw *dl_bw_of(int i)
{
return &cpu_rq(i)->dl.dl_bw;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nico Pitre | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static inline int dl_bw_cpus(int i)
{
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nico Pitre | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
#endif
static inline
void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 62 | 100.00% | 2 | 100.00% |
Total | 62 | 100.00% | 2 | 100.00% |
static inline
void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 65 | 100.00% | 1 | 100.00% |
Total | 65 | 100.00% | 1 | 100.00% |
static inline
void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw += dl_bw;
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
static inline
void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
if (dl_rq->this_bw > old)
dl_rq->this_bw = 0;
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 76 | 100.00% | 1 | 100.00% |
Total | 76 | 100.00% | 1 | 100.00% |
void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
struct rq *rq;
if (task_on_rq_queued(p))
return;
rq = task_rq(p);
if (p->dl.dl_non_contending) {
sub_running_bw(p->dl.dl_bw, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
sub_rq_bw(p->dl.dl_bw, &rq->dl);
add_rq_bw(new_bw, &rq->dl);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 109 | 100.00% | 2 | 100.00% |
Total | 109 | 100.00% | 2 | 100.00% |
/*
* The utilization of a task cannot be immediately removed from
* the rq active utilization (running_bw) when the task blocks.
* Instead, we have to wait for the so called "0-lag time".
*
* If a task blocks before the "0-lag time", a timer (the inactive
* timer) is armed, and running_bw is decreased when the timer
* fires.
*
* If the task wakes up again before the inactive timer fires,
* the timer is cancelled, whereas if the task wakes up after the
* inactive timer fired (and running_bw has been decreased) the
* task's utilization has to be added to running_bw again.
* A flag in the deadline scheduling entity (dl_non_contending)
* is used to avoid race conditions between the inactive timer handler
* and task wakeups.
*
* The following diagram shows how running_bw is updated. A task is
* "ACTIVE" when its utilization contributes to running_bw; an
* "ACTIVE contending" task is in the TASK_RUNNING state, while an
* "ACTIVE non contending" task is a blocked task for which the "0-lag time"
* has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
* time already passed, which does not contribute to running_bw anymore.
* +------------------+
* wakeup | ACTIVE |
* +------------------>+ contending |
* | add_running_bw | |
* | +----+------+------+
* | | ^
* | dequeue | |
* +--------+-------+ | |
* | | t >= 0-lag | | wakeup
* | INACTIVE |<---------------+ |
* | | sub_running_bw | |
* +--------+-------+ | |
* ^ | |
* | t < 0-lag | |
* | | |
* | V |
* | +----+------+------+
* | sub_running_bw | ACTIVE |
* +-------------------+ |
* inactive timer | non contending |
* fired +------------------+
*
* The task_non_contending() function is invoked when a task
* blocks, and checks if the 0-lag time already passed or
* not (in the first case, it directly updates running_bw;
* in the second case, it arms the inactive timer).
*
* The task_contending() function is invoked when a task wakes
* up, and checks if the task is still in the "ACTIVE non contending"
* state or not (in the second case, it updates running_bw).
*/
static void task_non_contending(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->inactive_timer;
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
s64 zerolag_time;
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
div64_long((dl_se->runtime * dl_se->dl_period),
dl_se->dl_runtime);
/*
* Using relative times instead of the absolute "0-lag time"
* allows to simplify the code
*/
zerolag_time -= rq_clock(rq);
/*
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
if (zerolag_time < 0) {
if (dl_task(p))
sub_running_bw(dl_se->dl_bw, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD)
sub_rq_bw(p->dl.dl_bw, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
__dl_clear_params(p);
raw_spin_unlock(&dl_b->lock);
}
return;
}
dl_se->dl_non_contending = 1;
get_task_struct(p);
hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 252 | 99.60% | 4 | 80.00% |
Peter Zijlstra | 1 | 0.40% | 1 | 20.00% |
Total | 253 | 100.00% | 5 | 100.00% |
static void task_contending(struct sched_dl_entity *dl_se, int flags)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (flags & ENQUEUE_MIGRATED)
add_rq_bw(dl_se->dl_bw, dl_rq);
if (dl_se->dl_non_contending) {
dl_se->dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
put_task_struct(dl_task_of(dl_se));
} else {
/*
* Since "dl_non_contending" is not set, the
* task's utilization has already been removed from
* active utilization (either when the task blocked,
* when the "inactive timer" fired).
* So, add it back.
*/
add_running_bw(dl_se->dl_bw, dl_rq);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Luca Abeni | 97 | 100.00% | 2 | 100.00% |
Total | 97 | 100.00% | 2 | 100.00% |
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
return dl_rq->root.rb_leftmost == &dl_se->rb_node;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 37 | 94.87% | 1 | 50.00% |
Davidlohr Bueso A | 2 | 5.13% | 1 | 50.00% |
Total | 39 | 100.00% | 2 | 100.00% |
void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
{
raw_spin_lock_init(&dl_b->dl_runtime_lock);
dl_b->dl_period = period;
dl_b->dl_runtime = runtime;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
void init_dl_bw(struct dl_bw *dl_b)
{
raw_spin_lock_init(&dl_b->lock);
raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
if (global_rt_runtime() == RUNTIME_INF)
dl_b->bw = -1;
else
dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
dl_b->total_bw = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 65 | 95.59% | 1 | 50.00% |
Peter Zijlstra | 3 | 4.41% | 1 | 50.00% |
Total | 68 | 100.00% | 2 | 100.00% |
void init_dl_rq(struct dl_rq *dl_rq)
{
dl_rq->root = RB_ROOT_CACHED;
#ifdef CONFIG_SMP
/* zero means no -deadline tasks */
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
#else
init_dl_bw(&dl_rq->dl_bw);
#endif
dl_rq->running_bw = 0;
dl_rq->this_bw = 0;
init_dl_rq_bw_ratio(dl_rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 37 | 45.68% | 1 | 14.29% |
Dario Faggioli | 24 | 29.63% | 2 | 28.57% |
Luca Abeni | 17 | 20.99% | 3 | 42.86% |
Davidlohr Bueso A | 3 | 3.70% | 1 | 14.29% |
Total | 81 | 100.00% | 7 | 100.00% |
#ifdef CONFIG_SMP
static inline int dl_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->dlo_count);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 16 | 69.57% | 1 | 50.00% |
Dario Faggioli | 7 | 30.43% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
static inline void dl_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
/*
* Must be visible before the overload count is
* set (as in sched_rt.c).
*
* Matched by the barrier in pull_dl_task().
*/
smp_wmb();
atomic_inc(&rq->rd->dlo_count);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 37 | 78.72% | 1 | 50.00% |
Dario Faggioli | 10 | 21.28% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
static inline void dl_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
atomic_dec(&rq->rd->dlo_count);
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 22 | 51.16% | 1 | 50.00% |
Dario Faggioli | 21 | 48.84% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
static void update_dl_migration(struct dl_rq *dl_rq)
{
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
}
} else if (dl_rq->overloaded) {
dl_clear_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 39 | 54.93% | 1 | 33.33% |
Dario Faggioli | 31 | 43.66% | 1 | 33.33% |
Kirill V Tkhai | 1 | 1.41% | 1 | 33.33% |
Total | 71 | 100.00% | 3 | 100.00% |
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
update_dl_migration(dl_rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 27 | 61.36% | 1 | 33.33% |
Dario Faggioli | 15 | 34.09% | 1 | 33.33% |
Ingo Molnar | 2 | 4.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dario Faggioli | 23 | 52.27% | 1 | 33.33% |
Juri Lelli | 19 | 43.18% | 1 | 33.33% |
Ingo Molnar | 2 | 4.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
/*
* The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct task_struct *entry;
bool leftmost = true;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct task_struct,
pushable_dl_tasks);
if (dl_entity_preempt(&p->dl, &entry->dl))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = false;
}
}
if (leftmost)
dl_rq->earliest_dl.next = p->dl.deadline;
rb_link_node(&p->pushable_dl_tasks, parent, link);
rb_insert_color_cached(&p->pushable_dl_tasks,
&dl_rq->pushable_dl_tasks_root, leftmost);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 91 | 52.60% | 1 | 25.00% |
Dario Faggioli | 62 | 35.84% | 1 | 25.00% |
Wanpeng Li | 12 | 6.94% | 1 | 25.00% |
Davidlohr Bueso A | 8 | 4.62% | 1 | 25.00% |
Total | 173 | 100.00% | 4 | 100.00% |
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return;
if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
struct rb_node *next_node;
next_node = rb_next(&p->pushable_dl_tasks);
if (next_node) {
dl_rq->earliest_dl.next = rb_entry(next_node,
struct task_struct, pushable_dl_tasks)->dl.deadline;
}
}
rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 46 | 40.35% | 1 | 25.00% |
Dario Faggioli | 38 | 33.33% | 1 | 25.00% |
Wanpeng Li | 26 | 22.81% | 1 | 25.00% |
Davidlohr Bueso A | 4 | 3.51% | 1 | 25.00% |
Total | 114 | 100.00% | 4 | 100.00% |
static inline int has_pushable_dl_tasks(struct rq *rq)
{
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 17 | 65.38% | 1 | 33.33% |
Dario Faggioli | 7 | 26.92% | 1 | 33.33% |
Davidlohr Bueso A | 2 | 7.69% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return dl_task(prev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
static void push_dl_tasks(struct rq *);
static void pull_dl_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq)
{
if (!has_pushable_dl_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 38 | 100.00% | 3 | 100.00% |
Total | 38 | 100.00% | 3 | 100.00% |
static inline void queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 29 | 100.00% | 2 | 100.00% |
Total | 29 | 100.00% | 2 | 100.00% |
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
{
struct rq *later_rq = NULL;
later_rq = find_lock_later_rq(p, rq);
if (!later_rq) {
int cpu;
/*
* If we cannot preempt any rq, fall back to pick any
* online cpu.
*/
cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
if (cpu >= nr_cpu_ids) {
/*
* Fail to find any suitable cpu.
* The task will never come back!
*/
BUG_ON(dl_bandwidth_enabled());
/*
* If admission control is disabled we
* try a little harder to let the task
* run.
*/
cpu = cpumask_any(cpu_active_mask);
}
later_rq = cpu_rq(cpu);
double_lock_balance(rq, later_rq);
}
set_task_cpu(p, later_rq->cpu);
double_unlock_balance(later_rq, rq);
return later_rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wanpeng Li | 102 | 90.27% | 1 | 33.33% |
Peter Zijlstra | 8 | 7.08% | 1 | 33.33% |
Ingo Molnar | 3 | 2.65% | 1 | 33.33% |
Total | 113 | 100.00% | 3 | 100.00% |
#else
static inline
void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 10 | 62.50% | 1 | 50.00% |
Dario Faggioli | 6 | 37.50% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline
void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 8 | 50.00% | 1 | 50.00% |
Dario Faggioli | 8 | 50.00% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline
void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 12 | 75.00% | 1 | 50.00% |
Dario Faggioli | 4 | 25.00% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline
void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 15 | 93.75% | 1 | 50.00% |
Dario Faggioli | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static inline void pull_dl_task(struct rq *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 11 | 100.00% | 2 | 100.00% |
Total | 11 | 100.00% | 2 | 100.00% |
static inline void queue_push_tasks(struct rq *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 11 | 100.00% | 2 | 100.00% |
Total | 11 | 100.00% | 2 | 100.00% |
static inline void queue_pull_task(struct rq *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 11 | 100.00% | 2 | 100.00% |
Total | 11 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_SMP */
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
int flags);
/*
* We are being explicitly informed that a new instance is starting,
* and this means that:
* - the absolute deadline of the entity has to be placed at
* current time + relative deadline;
* - the runtime of the entity has to be set to the maximum value.
*
* The capability of specifying such event is useful whenever a -deadline
* entity wants to (try to!) synchronize its behaviour with the scheduler's
* one, and to (try to!) reconcile itself with its own scheduling
* parameters.
*/
static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(dl_se->dl_boosted);
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
* We are racing with the deadline timer. So, do nothing because
* the deadline timer handler will take care of properly recharging
* the runtime and postponing the deadline
*/
if (dl_se->dl_throttled)
return;
/*
* We use the regular wall clock time to set deadlines in the
* future; in fact, we must consider execution overheads (time
* spent on hardirq context, etc.).
*/
dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
dl_se->runtime = dl_se->dl_runtime;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 68 | 80.95% | 2 | 66.67% |
Luca Abeni | 16 | 19.05% | 1 | 33.33% |
Total | 84 | 100.00% | 3 | 100.00% |
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
* exhausting its runtime.
*
* Here we are interested in making runtime overrun possible, but we do
* not want a entity which is misbehaving to affect the scheduling of all
* other entities.
* Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
* is used, in order to confine each entity within its own bandwidth.
*
* This function deals exactly with that, and ensures that when the runtime
* of a entity is replenished, its deadline is also postponed. That ensures
* the overrunning entity can't interfere with other entity in the system and
* can't make them miss their deadlines. Reasons why this kind of overruns
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setattr().
*/
static void replenish_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
BUG_ON(pi_se->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
/*
* We keep moving the deadline away until we get some
* available runtime for the entity. This ensures correct
* handling of situations where the runtime overrun is
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
dl_se->deadline += pi_se->dl_period;
dl_se->runtime += pi_se->dl_runtime;
}
/*
* At this point, the deadline really should be "in
* the future" with respect to rq->clock. If it's
* not, we are, for some reason, lagging too much!
* Anyway, after having warn userspace abut that,
* we still try to keep the things running by
* resetting the deadline and the budget of the
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
if (dl_se->dl_yielded)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 88 | 46.56% | 1 | 12.50% |
Dario Faggioli | 56 | 29.63% | 2 | 25.00% |
Peter Zijlstra | 42 | 22.22% | 2 | 25.00% |
John Stultz | 1 | 0.53% | 1 | 12.50% |
Harald Gustafsson | 1 | 0.53% | 1 | 12.50% |
Steven Rostedt | 1 | 0.53% | 1 | 12.50% |
Total | 189 | 100.00% | 8 | 100.00% |
/*
* Here we check if --at time t-- an entity (which is probably being
* [re]activated or, in general, enqueued) can use its remaining runtime
* and its current deadline _without_ exceeding the bandwidth it is
* assigned (function returns true if it can't). We are in fact applying
* one of the CBS rules: when a task wakes up, if the residual runtime
* over residual deadline fits within the allocated bandwidth, then we
* can keep the current (absolute) deadline and residual budget without
* disrupting the schedulability of the system. Otherwise, we should
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
* Documentation/scheduler/sched-deadline.txt for more informations).
*
* This function returns true if:
*
* runtime / (deadline - t) > dl_runtime / dl_deadline ,
*
* IOW we can't recycle current parameters.
*
* Notice that the bandwidth check is done against the deadline. For
* task with deadline equal to period this is the same of using
* dl_period instead of dl_deadline in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, u64 t)
{
u64 left, right;
/*
* left and right are the two sides of the equation above,
* after a bit of shuffling to use multiplications instead
* of divisions.
*
* Note that none of the time values involved in the two
* multiplications are absolute: dl_deadline and dl_runtime
* are the relative deadline and the maximum runtime of each
* instance, runtime is the runtime left for the last instance
* and (deadline - t), since t is rq->clock, is the time left
* to the (absolute) deadline. Even if overflowing the u64 type
* is very unlikely to occur in both cases, here we scale down
* as we want to avoid that risk at all. Scaling down by 10
* means that we reduce granularity to 1us. We are fine with it,
* since this is only a true/false check and, anyway, thinking
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_se->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juri Lelli | 61 | 83.56% | 1 | 25.00% |
Dario Faggioli | 11 | 15.07% | 2 | 50.00% |
Steven Rostedt | 1 | 1.37% | 1 | 25.00% |
Total | 73 | 100.00% | 4 | 100.00% |
/*
* Revised wakeup rule [1]: For self-suspending tasks, rather then
* re-initializing task's runtime and deadline, the revised wakeup
* rule adjusts the task's runtime to avoid the task to overrun its
* density.
*
* Reasoning: a task may overrun the density if:
* runtime / (deadline - t) > dl_runtime / dl_deadline
*
* Therefore, runtime can be adjusted to:
* runtime = (dl_runtime / dl_deadline) * (deadline - t)
*
* In such way that runtime will be equal to the maximum density
* the task can use without breaking any rule.
*
* [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
* bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
*/
static void
update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
{
u64 laxity = dl_se->deadline - rq_clock(rq);
/*
* If the task has deadline < period, and the deadline is in the past,
* it should already be throttled before this check.
*
* See update_dl_entity() comments for further details.
*/
WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Bristot de Oliveira | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
/*
* Regarding the deadline, a task with implicit deadline has a relative
* deadline == relative period. A task with constrained deadline has a
* relative deadline <= relative period.
*
* We support constrained deadline tasks. However, there are some restrictions
* applied only for tasks which do not have an implicit deadline. See
* update_dl_entity() to know more about such restrictions.
*
* The dl_is_implicit() returns true if the task has an implicit deadline.
*/