cregit-Linux how code gets into the kernel

Release 4.15 kernel/sched/core.c

Directory: kernel/sched
/*
 *  kernel/sched/core.c
 *
 *  Core kernel scheduler code and related syscalls
 *
 *  Copyright (C) 1991-2002  Linus Torvalds
 */
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <uapi/linux/sched/types.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/hotplug.h>
#include <linux/wait_bit.h>
#include <linux/cpuset.h>
#include <linux/delayacct.h>
#include <linux/init_task.h>
#include <linux/context_tracking.h>
#include <linux/rcupdate_wait.h>
#include <linux/compat.h>

#include <linux/blkdev.h>
#include <linux/kprobes.h>
#include <linux/mmu_context.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/prefetch.h>
#include <linux/profile.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/sched/isolation.h>

#include <asm/switch_to.h>
#include <asm/tlb.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif

#include "sched.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"


#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>

DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
/*
 * Debugging: various feature bits
 *
 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
 * sysctl_sched_features, defined in sched.h, to allow constants propagation
 * at compile time and compiler optimization based on features default.
 */

#define SCHED_FEAT(name, enabled)	\
	(1UL << __SCHED_FEAT_##name) * enabled |

const_debug unsigned int sysctl_sched_features =
#include "features.h"
	0;

#undef SCHED_FEAT
#endif

/*
 * Number of tasks to iterate in a single balance run.
 * Limited because this is done with IRQs disabled.
 */

const_debug unsigned int sysctl_sched_nr_migrate = 32;

/*
 * period over which we average the RT time consumption, measured
 * in ms.
 *
 * default: 1s
 */

const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;

/*
 * period over which we measure -rt task CPU usage in us.
 * default: 1s
 */

unsigned int sysctl_sched_rt_period = 1000000;


__read_mostly int scheduler_running;

/*
 * part of the period that we allow rt tasks to run in us.
 * default: 0.95s
 */

int sysctl_sched_rt_runtime = 950000;

/*
 * __task_rq_lock - lock the rq @p resides on.
 */

struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(rq->lock) { struct rq *rq; lockdep_assert_held(&p->pi_lock); for (;;) { rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { rq_pin_lock(rq, rf); return rq; } raw_spin_unlock(&rq->lock); while (unlikely(task_on_rq_migrating(p))) cpu_relax(); } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra10697.25%266.67%
Matt Fleming32.75%133.33%
Total109100.00%3100.00%

/* * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. */
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(p->pi_lock) __acquires(rq->lock) { struct rq *rq; for (;;) { raw_spin_lock_irqsave(&p->pi_lock, rf->flags); rq = task_rq(p); raw_spin_lock(&rq->lock); /* * move_queued_task() task_rq_lock() * * ACQUIRE (rq->lock) * [S] ->on_rq = MIGRATING [L] rq = task_rq() * WMB (__set_task_cpu()) ACQUIRE (rq->lock); * [S] ->cpu = new_cpu [L] task_rq() * [L] ->on_rq * RELEASE (rq->lock) * * If we observe the old cpu in task_rq_lock, the acquire of * the old rq->lock will fully serialize against the stores. * * If we observe the new CPU in task_rq_lock, the acquire will * pair with the WMB to ensure we must then also see migrating. */ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { rq_pin_lock(rq, rf); return rq; } raw_spin_unlock(&rq->lock); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); while (unlikely(task_on_rq_migrating(p))) cpu_relax(); } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra8765.91%240.00%
Ingo Molnar4433.33%240.00%
Matt Fleming10.76%120.00%
Total132100.00%5100.00%

/* * RQ-clock updating methods: */
static void update_rq_clock_task(struct rq *rq, s64 delta) { /* * In theory, the compile should just see 0 here, and optimize out the call * to sched_rt_avg_update. But I don't trust it... */ #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) s64 steal = 0, irq_delta = 0; #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; /* * Since irq_time is only updated on {soft,}irq_exit, we might run into * this case when a previous update_rq_clock() happened inside a * {soft,}irq region. * * When this happens, we stop ->clock_task and only update the * prev_irq_time stamp to account for the part that fit, so that a next * update will consume the rest. This ensures ->clock_task is * monotonic. * * It does however cause some slight miss-attribution of {soft,}irq * time, a more accurate solution would be to update the irq_time using * the current rq->clock timestamp, except that would require using * atomic ops. */ if (irq_delta > delta) irq_delta = delta; rq->prev_irq_time += irq_delta; delta -= irq_delta; #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING if (static_key_false((&paravirt_steal_rq_enabled))) { steal = paravirt_steal_clock(cpu_of(rq)); steal -= rq->prev_steal_time_rq; if (unlikely(steal > delta)) steal = delta; rq->prev_steal_time_rq += steal; delta -= steal; } #endif rq->clock_task += delta; #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) sched_rt_avg_update(rq, irq_delta + steal); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar174100.00%1100.00%
Total174100.00%1100.00%


void update_rq_clock(struct rq *rq) { s64 delta; lockdep_assert_held(&rq->lock); if (rq->clock_update_flags & RQCF_ACT_SKIP) return; #ifdef CONFIG_SCHED_DEBUG if (sched_feat(WARN_DOUBLE_CLOCK)) SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); rq->clock_update_flags |= RQCF_UPDATED; #endif delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; if (delta < 0) return; rq->clock += delta; update_rq_clock_task(rq, delta); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5762.64%125.00%
Peter Zijlstra3437.36%375.00%
Total91100.00%4100.00%

#ifdef CONFIG_SCHED_HRTICK /* * Use HR-timers to deliver accurate preemption points. */
static void hrtick_clear(struct rq *rq) { if (hrtimer_active(&rq->hrtick_timer)) hrtimer_cancel(&rq->hrtick_timer); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2689.66%350.00%
Gregory Haskins13.45%116.67%
Linus Torvalds13.45%116.67%
Steven Rostedt13.45%116.67%
Total29100.00%6100.00%

/* * High-resolution timer tick. * Runs from hardirq context with interrupts disabled. */
static enum hrtimer_restart hrtick(struct hrtimer *timer) { struct rq *rq = container_of(timer, struct rq, hrtick_timer); struct rq_flags rf; WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); rq_lock(rq, &rf); update_rq_clock(rq); rq->curr->sched_class->task_tick(rq, rq->curr, 1); rq_unlock(rq, &rf); return HRTIMER_NORESTART; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6578.31%750.00%
Paul Turner910.84%17.14%
Mel Gorman33.61%17.14%
Peter Williams22.41%17.14%
Mike Galbraith11.20%17.14%
Ingo Molnar11.20%17.14%
Nikhil P Rao11.20%17.14%
Rik Van Riel11.20%17.14%
Total83100.00%14100.00%

#ifdef CONFIG_SMP
static void __hrtick_restart(struct rq *rq) { struct hrtimer *timer = &rq->hrtick_timer; hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1242.86%120.00%
Mel Gorman828.57%120.00%
Jason Baron517.86%120.00%
Thomas Gleixner27.14%120.00%
Paul Turner13.57%120.00%
Total28100.00%5100.00%

/* * called from hardirq (IPI) context */
static void __hrtick_start(void *arg) { struct rq *rq = arg; struct rq_flags rf; rq_lock(rq, &rf); __hrtick_restart(rq); rq->hrtick_csd_pending = 0; rq_unlock(rq, &rf); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3675.00%654.55%
Paul Turner1020.83%327.27%
Ingo Molnar24.17%218.18%
Total48100.00%11100.00%

/* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */
void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; ktime_t time; s64 delta; /* * Don't schedule slices shorter than 10000ns, that just * doesn't make sense and can cause timer DoS. */ delta = max_t(s64, delay, 10000LL); time = ktime_add_ns(timer->base->get_time(), delta); hrtimer_set_expires(timer, time); if (rq == this_rq()) { __hrtick_restart(rq); } else if (!rq->hrtick_csd_pending) { smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); rq->hrtick_csd_pending = 1; } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6864.76%942.86%
xiaofeng.yan2019.05%14.76%
Ingo Molnar76.67%314.29%
Steven Rostedt32.86%14.76%
Andrew Morton10.95%14.76%
Paul Turner10.95%14.76%
Gregory Haskins10.95%14.76%
Frédéric Weisbecker10.95%14.76%
Tejun Heo10.95%14.76%
Paul Mackerras10.95%14.76%
Linus Torvalds10.95%14.76%
Total105100.00%21100.00%

#else /* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */
void hrtick_start(struct rq *rq, u64 delay) { /* * Don't schedule slices shorter than 10000ns, that just * doesn't make sense. Rely on vruntime for fairness. */ delay = max_t(u64, delay, 10000LL); hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL_PINNED); }

Contributors

PersonTokensPropCommitsCommitProp
Wanpeng Li1230.00%110.00%
Peter Zijlstra1127.50%220.00%
Tejun Heo820.00%110.00%
Alexey Dobriyan37.50%110.00%
Ingo Molnar25.00%110.00%
Mathieu Desnoyers12.50%110.00%
Thomas Gleixner12.50%110.00%
Rusty Russell12.50%110.00%
Linus Torvalds12.50%110.00%
Total40100.00%10100.00%

#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq) { #ifdef CONFIG_SMP rq->hrtick_csd_pending = 0; rq->hrtick_csd.flags = 0; rq->hrtick_csd.func = __hrtick_start; rq->hrtick_csd.info = rq; #endif hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3146.97%228.57%
Tejun Heo2131.82%114.29%
Gregory Haskins812.12%114.29%
Linus Torvalds34.55%114.29%
Alexey Dobriyan23.03%114.29%
Rusty Russell11.52%114.29%
Total66100.00%7100.00%

#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra11100.00%2100.00%
Total11100.00%2100.00%


static inline void init_rq_hrtick(struct rq *rq) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra11100.00%2100.00%
Total11100.00%2100.00%

#endif /* CONFIG_SCHED_HRTICK */ /* * cmpxchg based fetch_or, macro so it works for different integer types */ #define fetch_or(ptr, mask) \ ({ \ typeof(ptr) _ptr = (ptr); \ typeof(mask) _mask = (mask); \ typeof(*_ptr) _old, _val = *_ptr; \ \ for (;;) { \ _old = cmpxchg(_ptr, _val, _val | _mask); \ if (_old == _val) \ break; \ _val = _old; \ } \ _old; \ }) #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) /* * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, * this avoids any races wrt polling state changes and thereby avoids * spurious IPIs. */
static bool set_nr_and_not_polling(struct task_struct *p) { struct thread_info *ti = task_thread_info(p); return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra37100.00%1100.00%
Total37100.00%1100.00%

/* * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. * * If this returns true, then the idle task promises to call * sched_ttwu_pending() and reschedule soon. */
static bool set_nr_if_polling(struct task_struct *p) { struct thread_info *ti = task_thread_info(p); typeof(ti->flags) old, val = READ_ONCE(ti->flags); for (;;) { if (!(val & _TIF_POLLING_NRFLAG)) return false; if (val & _TIF_NEED_RESCHED) return true; old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); if (old == val) break; val = old; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra9398.94%150.00%
Jason Low11.06%150.00%
Total94100.00%2100.00%

#else
static bool set_nr_and_not_polling(struct task_struct *p) { set_tsk_need_resched(p); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%

#ifdef CONFIG_SMP
static bool set_nr_if_polling(struct task_struct *p) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra14100.00%1100.00%
Total14100.00%1100.00%

#endif #endif
void wake_q_add(struct wake_q_head *head, struct task_struct *task) { struct wake_q_node *node = &task->wake_q; /* * Atomically grab the task, if ->wake_q is !nil already it means * its already queued (either by us or someone else) and will get the * wakeup due to that. * * This cmpxchg() implies a full barrier, which pairs with the write * barrier implied by the wakeup in wake_up_q(). */ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) return; get_task_struct(task); /* * The head is context local, there can be no concurrency. */ *head->lastp = node; head->lastp = &node->next; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6298.41%150.00%
Davidlohr Bueso A11.59%150.00%
Total63100.00%2100.00%


void wake_up_q(struct wake_q_head *head) { struct wake_q_node *node = head->first; while (node != WAKE_Q_TAIL) { struct task_struct *task; task = container_of(node, struct task_struct, wake_q); BUG_ON(!task); /* Task can safely be re-inserted now: */ node = node->next; task->wake_q.next = NULL; /* * wake_up_process() implies a wmb() to pair with the queueing * in wake_q_add() so as not to miss wakeups. */ wake_up_process(task); put_task_struct(task); } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra7598.68%150.00%
Ingo Molnar11.32%150.00%
Total76100.00%2100.00%

/* * resched_curr - mark rq's current task 'to be rescheduled now'. * * On UP this means the setting of the need_resched flag, on SMP it * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */
void resched_curr(struct rq *rq) { struct task_struct *curr = rq->curr; int cpu; lockdep_assert_held(&rq->lock); if (test_tsk_need_resched(curr)) return; cpu = cpu_of(rq); if (cpu == smp_processor_id()) { set_tsk_need_resched(curr); set_preempt_need_resched(); return; } if (set_nr_and_not_polling(curr)) smp_send_reschedule(cpu); else trace_sched_wake_idle_without_ipi(cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3745.68%545.45%
Kirill V Tkhai1822.22%19.09%
Ingo Molnar1214.81%218.18%
Andrew Lutomirski67.41%19.09%
Gregory Haskins56.17%19.09%
Rusty Russell33.70%19.09%
Total81100.00%11100.00%


void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); resched_curr(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4289.36%457.14%
Kirill V Tkhai24.26%114.29%
Paul E. McKenney24.26%114.29%
Alexey Dobriyan12.13%114.29%
Total47100.00%7100.00%

#ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ_COMMON /* * In the semi idle case, use the nearest busy CPU for migrating timers * from an idle CPU. This is good for power-savings. * * We don't do similar optimization for completely idle system, as * selecting an idle CPU will add more delays to the timers than intended * (as that CPU's timer base may not be uptodate wrt jiffies etc). */
int get_nohz_timer_target(void) { int i, cpu = smp_processor_id(); struct sched_domain *sd; if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) return cpu; rcu_read_lock(); for_each_domain(cpu, sd) { for_each_cpu(i, sched_domain_span(sd)) { if (cpu == i) continue; if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) { cpu = i; goto unlock; } } } if (!housekeeping_cpu(cpu, HK_FLAG_TIMER)) cpu = housekeeping_any_cpu(HK_FLAG_TIMER); unlock: rcu_read_unlock(); return cpu; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra5649.56%746.67%
Vatika Harlalka1815.93%16.67%
Frédéric Weisbecker1210.62%213.33%
Viresh Kumar119.73%16.67%
Wanpeng Li87.08%16.67%
Suresh B. Siddha54.42%213.33%
Thomas Gleixner32.65%16.67%
Total113100.00%15100.00%

/* * When add_timer_on() enqueues a timer into the timer wheel of an * idle CPU then this timer might expire before the next timer event * which is scheduled to wake up that CPU. In case of a completely * idle system the next event might even be infinite time into the * future. wake_up_idle_cpu() ensures that the CPU is woken up and * leaves the inner idle loop so the newly added timer is taken into * account when the CPU goes back to idle and evaluates the timer * wheel for the next timer event. */
static void wake_up_idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); if (cpu == smp_processor_id()) return; if (set_nr_and_not_polling(rq->idle)) smp_send_reschedule(cpu); else trace_sched_wake_idle_without_ipi(cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3472.34%555.56%
Andrew Lutomirski714.89%222.22%
Alexey Dobriyan510.64%111.11%
Frédéric Weisbecker12.13%111.11%
Total47100.00%9100.00%


static bool wake_up_full_nohz_cpu(int cpu) { /* * We just need the target to call irq_exit() and re-evaluate * the next tick. The nohz full kick at least implies that. * If needed we can still optimize that later with an * empty IRQ. */ if (cpu_is_offline(cpu)) return true; /* Don't try to wake offline CPUs. */ if (tick_nohz_full_cpu(cpu)) { if (cpu != smp_processor_id() || tick_nohz_tick_stopped()) tick_nohz_full_kick_cpu(cpu); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker4078.43%375.00%
Paul E. McKenney1121.57%125.00%
Total51100.00%4100.00%

/* * Wake up the specified CPU. If the CPU is going offline, it is the * caller's responsibility to deal with the lost wakeup, for example, * by hooking into the CPU_DEAD notifier like timers and hrtimers do. */
void wake_up_nohz_cpu(int cpu) { if (!wake_up_full_nohz_cpu(cpu)) wake_up_idle_cpu(cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker21100.00%2100.00%
Total21100.00%2100.00%


static inline bool got_nohz_idle_kick(void) { int cpu = smp_processor_id(); if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) return false; if (idle_cpu(cpu) && !need_resched()) return true; /* * We can't run Idle Load Balance on this CPU for this time so we * cancel it and clear NOHZ_BALANCE_KICK */ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); return false; }

Contributors

PersonTokensPropCommitsCommitProp
Vincent Guittot2949.15%116.67%
Peter Zijlstra2338.98%466.67%
Suresh B. Siddha711.86%116.67%
Total59100.00%6100.00%

#else /* CONFIG_NO_HZ_COMMON */
static inline bool got_nohz_idle_kick(void) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra12100.00%1100.00%
Total12100.00%1100.00%

#endif /* CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL
bool sched_can_stop_tick(struct rq *rq) { int fifo_nr_running; /* Deadline tasks, even if single, need the tick */ if (rq->dl.dl_nr_running) return false; /* * If there are more than one RR tasks, we need the tick to effect the * actual RR behaviour. */ if (rq->rt.rr_nr_running) { if (rq->rt.rr_nr_running == 1) return true; else return false; } /* * If there's no RR tasks, but FIFO tasks, we can skip the tick, no * forced preemption between FIFO tasks. */ fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; if (fifo_nr_running) return true; /* * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; * if there's more than one we need the tick for involuntary * preemption. */ if (rq->nr_running > 1) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker5358.89%250.00%
Peter Zijlstra2426.67%125.00%
Rik Van Riel1314.44%125.00%
Total90100.00%4100.00%

#endif /* CONFIG_NO_HZ_FULL */
void sched_avg_update(struct rq *rq) { s64 period = sched_avg_period(); while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { /* * Inline assembly required to prevent the compiler * optimising this loop into a divmod call. * See __iter_div_u64_rem() for another example of this. */ asm("" : "+rm" (rq->age_stamp)); rq->age_stamp += period; rq->rt_avg /= 2; } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3264.00%220.00%
Andrew Morton510.00%110.00%
Frédéric Weisbecker36.00%110.00%
Mike Galbraith36.00%110.00%
Gregory Haskins24.00%110.00%
Ingo Molnar24.00%220.00%
Alexey Dobriyan24.00%110.00%
Tejun Heo12.00%110.00%
Total50100.00%10100.00%

#endif /* CONFIG_SMP */ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) /* * Iterate task_group tree rooted at *from, calling @down when first entering a * node and @up when leaving it for the final time. * * Caller must hold rcu_lock or sufficient equivalent. */
int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; parent = from; down: ret = (*down)(parent, data); if (ret) goto out; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; up: continue; } ret = (*up)(parent, data); if (ret || parent == from) goto out; child = parent; parent = parent->parent; if (parent) goto up; out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra8973.55%562.50%
Tejun Heo1613.22%112.50%
Andrew Morton1310.74%112.50%
Gregory Haskins32.48%112.50%
Total121100.00%8100.00%


int tg_nop(struct task_group *tg, void *data) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1164.71%133.33%
Peter Williams423.53%133.33%
Gregory Haskins211.76%133.33%
Total17100.00%3100.00%

#endif
static void set_load_weight(struct task_struct *p, bool update_load) { int prio = p->static_prio - MAX_RT_PRIO; struct load_weight *load = &p->se.load; /* * SCHED_IDLE tasks get minimal weight: */ if (idle_policy(p->policy)) { load->weight = scale_load(WEIGHT_IDLEPRIO); load->inv_weight = WMULT_IDLEPRIO; return; } /* * SCHED_OTHER tasks have to update their load when changing their * weight */ if (update_load && p->sched_class == &fair_sched_class) { reweight_task(p, prio); } else { load->weight = scale_load(sched_prio_to_weight[prio]); load->inv_weight = sched_prio_to_wmult[prio]; } }

Contributors

PersonTokensPropCommitsCommitProp
Vincent Guittot2725.00%17.69%
Ingo Molnar2523.15%215.38%
Nikhil P Rao2220.37%215.38%
Peter Zijlstra2119.44%323.08%
Gregory Haskins43.70%17.69%
Henrik Austad32.78%17.69%
Steven Rostedt32.78%17.69%
Andi Kleen21.85%17.69%
Tejun Heo10.93%17.69%
Total108100.00%13100.00%


static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { if (!(flags & ENQUEUE_NOCLOCK)) update_rq_clock(rq); if (!(flags & ENQUEUE_RESTORE)) sched_info_queued(rq, p); p->sched_class->enqueue_task(rq, p, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3047.62%538.46%
Alexey Dobriyan1320.63%17.69%
Ingo Molnar1015.87%17.69%
Steven Rostedt46.35%215.38%
Mike Galbraith34.76%215.38%
Michael S. Tsirkin23.17%17.69%
Tejun Heo11.59%17.69%
Total63100.00%13100.00%


static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) { if (!(flags & DEQUEUE_NOCLOCK)) update_rq_clock(rq); if (!(flags & DEQUEUE_SAVE)) sched_info_dequeued(rq, p); p->sched_class->dequeue_task(rq, p, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4673.02%853.33%
Ingo Molnar914.29%213.33%
Michael S. Tsirkin23.17%16.67%
Tejun Heo23.17%16.67%
Mike Galbraith23.17%16.67%
Ankita Garg11.59%16.67%
Andrew Morton11.59%16.67%
Total63100.00%15100.00%


void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible--; enqueue_task(rq, p, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra39100.00%3100.00%
Total39100.00%3100.00%


void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible++; dequeue_task(rq, p, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2871.79%360.00%
Mike Galbraith1025.64%120.00%
Andrew Morton12.56%120.00%
Total39100.00%5100.00%

/* * __normal_prio - return the priority that is based on the static prio */
static inline int __normal_prio(struct task_struct *p) { return p->static_prio; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker952.94%125.00%
Peter Williams423.53%125.00%
Venkatesh Pallipadi317.65%125.00%
Peter Zijlstra15.88%125.00%
Total17100.00%4100.00%

/* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, * setprio syscalls, and whenever the interactivity * estimator recalculates. */
static inline int normal_prio(struct task_struct *p) { int prio; if (task_has_dl_policy(p)) prio = MAX_DL_PRIO-1; else if (task_has_rt_policy(p)) prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); return prio; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker2543.86%19.09%
Dario Faggioli1424.56%19.09%
Mike Galbraith712.28%327.27%
Andrew Morton58.77%19.09%
Ingo Molnar35.26%218.18%
Lennart Poettering11.75%19.09%
Nicholas Piggin11.75%19.09%
Peter Williams11.75%19.09%
Total57100.00%11100.00%

/* * Calculate the current priority, i.e. the priority * taken into account by the scheduler. This value might * be boosted by RT tasks, or might be boosted by * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */
static int effective_prio(struct task_struct *p) { p->normal_prio = normal_prio(p); /* * If we are RT tasks or we were boosted to RT priority, * keep the priority unchanged. Otherwise, update priority * to the normal priority: */ if (!rt_prio(p->prio)) return p->normal_prio; return p->prio; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker3278.05%116.67%
Peter Zijlstra37.32%116.67%
Mike Galbraith24.88%116.67%
Hiroshi Shimamoto24.88%116.67%
Ingo Molnar12.44%116.67%
Gregory Haskins12.44%116.67%
Total41100.00%6100.00%

/** * task_curr - is this task currently executing on a CPU? * @p: the task in question. * * Return: 1 if the task is currently executing. 0 otherwise. */
inline int task_curr(const struct task_struct