Release 4.15 kernel/time/tick-sched.c
/*
* linux/kernel/time/tick-sched.c
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
*
* No idle tick implementation for low and high resolution timers
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Distribute under GPLv2.
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/nmi.h>
#include <linux/profile.h>
#include <linux/sched/signal.h>
#include <linux/sched/clock.h>
#include <linux/sched/stat.h>
#include <linux/sched/nohz.h>
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/context_tracking.h>
#include <linux/mm.h>
#include <asm/irq_regs.h>
#include "tick-internal.h"
#include <trace/events/timer.h>
/*
* Per-CPU nohz control structure
*/
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
struct tick_sched *tick_get_tick_sched(int cpu)
{
return &per_cpu(tick_cpu_sched, cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
/*
* The time, when the last jiffy update happened. Protected by jiffies_lock.
*/
static ktime_t last_jiffies_update;
/*
* Must be called with interrupts disabled !
*/
static void tick_do_update_jiffies64(ktime_t now)
{
unsigned long ticks = 0;
ktime_t delta;
/*
* Do a quick check without holding jiffies_lock:
*/
delta = ktime_sub(now, last_jiffies_update);
if (delta < tick_period)
return;
/* Reevaluate with jiffies_lock held */
write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta >= tick_period) {
delta = ktime_sub(delta, tick_period);
last_jiffies_update = ktime_add(last_jiffies_update,
tick_period);
/* Slow path for long timeouts */
if (unlikely(delta >= tick_period)) {
s64 incr = ktime_to_ns(tick_period);
ticks = ktime_divns(delta, incr);
last_jiffies_update = ktime_add_ns(last_jiffies_update,
incr * ticks);
}
do_timer(++ticks);
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
write_sequnlock(&jiffies_lock);
return;
}
write_sequnlock(&jiffies_lock);
update_wall_time();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 119 | 78.29% | 2 | 28.57% |
Ingo Molnar | 16 | 10.53% | 1 | 14.29% |
Viresh Kumar | 10 | 6.58% | 1 | 14.29% |
John Stultz | 6 | 3.95% | 2 | 28.57% |
Wei Jiangang | 1 | 0.66% | 1 | 14.29% |
Total | 152 | 100.00% | 7 | 100.00% |
/*
* Initialize and return retrieve the jiffies update.
*/
static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
write_seqlock(&jiffies_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
write_sequnlock(&jiffies_lock);
return period;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 39 | 95.12% | 1 | 50.00% |
John Stultz | 2 | 4.88% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
static void tick_sched_do_timer(ktime_t now)
{
int cpu = smp_processor_id();
#ifdef CONFIG_NO_HZ_COMMON
/*
* Check if the do_timer duty was dropped. We don't care about
* concurrency: This happens only when the CPU in charge went
* into a long sleep. If two CPUs happen to assign themselves to
* this duty, then the jiffies update is still serialized by
* jiffies_lock.
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
&& !tick_nohz_full_cpu(cpu))
tick_do_timer_cpu = cpu;
#endif
/* Check, if the jiffies need an update */
if (tick_do_timer_cpu == cpu)
tick_do_update_jiffies64(now);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 51 | 98.08% | 4 | 80.00% |
Ingo Molnar | 1 | 1.92% | 1 | 20.00% |
Total | 52 | 100.00% | 5 | 100.00% |
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
{
#ifdef CONFIG_NO_HZ_COMMON
/*
* When we are idle and the tick is stopped, we have to touch
* the watchdog as we might not schedule for a really long
* time. This happens on complete idle SMP systems while
* waiting on the login prompt. We also increment the "start of
* idle" jiffy stamp so the idle accounting adjustment we do
* when we go busy again does not account too much ticks.
*/
if (ts->tick_stopped) {
touch_softlockup_watchdog_sched();
if (is_idle_task(current))
ts->idle_jiffies++;
/*
* In case the current tick fired too early past its expected
* expiration, make sure we don't bypass the next clock reprogramming
* to the same deadline.
*/
ts->next_tick = 0;
}
#endif
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 64 | 98.46% | 4 | 80.00% |
Tejun Heo | 1 | 1.54% | 1 | 20.00% |
Total | 65 | 100.00% | 5 | 100.00% |
#endif
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
bool tick_nohz_full_running;
static atomic_t tick_dep_mask;
static bool check_tick_dependency(atomic_t *dep)
{
int val = atomic_read(dep);
if (val & TICK_DEP_MASK_POSIX_TIMER) {
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
return true;
}
if (val & TICK_DEP_MASK_PERF_EVENTS) {
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
return true;
}
if (val & TICK_DEP_MASK_SCHED) {
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
return true;
}
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 93 | 100.00% | 3 | 100.00% |
Total | 93 | 100.00% | 3 | 100.00% |
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
if (check_tick_dependency(&tick_dep_mask))
return false;
if (check_tick_dependency(&ts->tick_dep_mask))
return false;
if (check_tick_dependency(¤t->tick_dep_mask))
return false;
if (check_tick_dependency(¤t->signal->tick_dep_mask))
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 69 | 80.23% | 4 | 80.00% |
Wanpeng Li | 17 | 19.77% | 1 | 20.00% |
Total | 86 | 100.00% | 5 | 100.00% |
static void nohz_full_kick_func(struct irq_work *work)
{
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 12 | 100.00% | 3 | 100.00% |
Total | 12 | 100.00% | 3 | 100.00% |
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
};
/*
* Kick this CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
* This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
* is NMI safe.
*/
static void tick_nohz_full_kick(void)
{
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 25 | 92.59% | 2 | 66.67% |
Christoph Lameter | 2 | 7.41% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
/*
* Kick the CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
*/
void tick_nohz_full_kick_cpu(int cpu)
{
if (!tick_nohz_full_cpu(cpu))
return;
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 30 | 100.00% | 2 | 100.00% |
Total | 30 | 100.00% | 2 | 100.00% |
/*
* Kick all full dynticks CPUs in order to force these to re-evaluate
* their dependency on the tick and restart it if necessary.
*/
static void tick_nohz_full_kick_all(void)
{
int cpu;
if (!tick_nohz_full_running)
return;
preempt_disable();
for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
tick_nohz_full_kick_cpu(cpu);
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 36 | 100.00% | 5 | 100.00% |
Total | 36 | 100.00% | 5 | 100.00% |
static void tick_nohz_dep_set_all(atomic_t *dep,
enum tick_dep_bits bit)
{
int prev;
prev = atomic_fetch_or(BIT(bit), dep);
if (!prev)
tick_nohz_full_kick_all();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 35 | 94.59% | 2 | 66.67% |
Peter Zijlstra | 2 | 5.41% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
/*
* Set a global tick dependency. Used by perf events that rely on freq and
* by unstable clock.
*/
void tick_nohz_dep_set(enum tick_dep_bits bit)
{
tick_nohz_dep_set_all(&tick_dep_mask, bit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
void tick_nohz_dep_clear(enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tick_dep_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 20 | 100.00% | 2 | 100.00% |
Total | 20 | 100.00% | 2 | 100.00% |
/*
* Set per-CPU tick dependency. Used by scheduler and perf events in order to
* manage events throttling.
*/
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
{
int prev;
struct tick_sched *ts;
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
if (!prev) {
preempt_disable();
/* Perf needs local kick that is NMI safe */
if (cpu == smp_processor_id()) {
tick_nohz_full_kick();
} else {
/* Remote irq work not NMI-safe */
if (!WARN_ON_ONCE(in_nmi()))
tick_nohz_full_kick_cpu(cpu);
}
preempt_enable();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 84 | 94.38% | 2 | 66.67% |
Peter Zijlstra | 5 | 5.62% | 1 | 33.33% |
Total | 89 | 100.00% | 3 | 100.00% |
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 38 | 100.00% | 2 | 100.00% |
Total | 38 | 100.00% | 2 | 100.00% |
/*
* Set a per-task tick dependency. Posix CPU timers need this in order to elapse
* per task timers.
*/
void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
/*
* We could optimize this with just kicking the target running the task
* if that noise matters for nohz full users.
*/
tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 27 | 100.00% | 2 | 100.00% |
Total | 27 | 100.00% | 2 | 100.00% |
/*
* Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
* per process timers.
*/
void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &sig->tick_dep_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 27 | 100.00% | 2 | 100.00% |
Total | 27 | 100.00% | 2 | 100.00% |
/*
* Re-evaluate the need for the tick as we switch the current task.
* It might need the tick due to per task/process properties:
* perf events, posix CPU timers, ...
*/
void __tick_nohz_task_switch(void)
{
unsigned long flags;
struct tick_sched *ts;
local_irq_save(flags);
if (!tick_nohz_full_cpu(smp_processor_id()))
goto out;
ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->tick_stopped) {
if (atomic_read(¤t->tick_dep_mask) ||
atomic_read(¤t->signal->tick_dep_mask))
tick_nohz_full_kick();
}
out:
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 69 | 87.34% | 5 | 83.33% |
Li Zhong | 10 | 12.66% | 1 | 16.67% |
Total | 79 | 100.00% | 6 | 100.00% |
/* Get the boot-time nohz CPU list from the kernel parameters. */
void __init tick_nohz_full_setup(cpumask_var_t cpumask)
{
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 26 | 100.00% | 5 | 100.00% |
Total | 26 | 100.00% | 5 | 100.00% |
static int tick_nohz_cpu_down(unsigned int cpu)
{
/*
* The boot CPU handles housekeeping duty (unbound timers,
* workqueues, timekeeping, ...) on behalf of full dynticks
* CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
return -EBUSY;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 20 | 76.92% | 2 | 66.67% |
Sebastian Andrzej Siewior | 6 | 23.08% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
static int tick_nohz_init_all(void)
{
int err = -1;
#ifdef CONFIG_NO_HZ_FULL_ALL
if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
err = 0;
cpumask_setall(tick_nohz_full_mask);
tick_nohz_full_running = true;
#endif
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 50 | 86.21% | 4 | 80.00% |
Paul E. McKenney | 8 | 13.79% | 1 | 20.00% |
Total | 58 | 100.00% | 5 | 100.00% |
void __init tick_nohz_init(void)
{
int cpu, ret;
if (!tick_nohz_full_running) {
if (tick_nohz_init_all() < 0)
return;
}
/*
* Full dynticks uses irq work to drive the tick rescheduling on safe
* locking contexts. But then we need irq work to raise its own
* interrupts to avoid circular dependency on the tick
*/
if (!arch_irq_work_has_interrupt()) {
pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
cpumask_clear(tick_nohz_full_mask);
tick_nohz_full_running = false;
return;
}
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
cpu);
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
for_each_cpu(cpu, tick_nohz_full_mask)
context_tracking_cpu_set(cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"kernel/nohz:predown", NULL,
tick_nohz_cpu_down);
WARN_ON(ret < 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 100 | 81.30% | 10 | 76.92% |
Sebastian Andrzej Siewior | 17 | 13.82% | 1 | 7.69% |
Joe Perches | 3 | 2.44% | 1 | 7.69% |
Tejun Heo | 3 | 2.44% | 1 | 7.69% |
Total | 123 | 100.00% | 13 | 100.00% |
#endif
/*
* NOHZ - aka dynamic tick functionality
*/
#ifdef CONFIG_NO_HZ_COMMON
/*
* NO HZ enabled ?
*/
bool tick_nohz_enabled __read_mostly = true;
unsigned long tick_nohz_active __read_mostly;
/*
* Enable / Disable tickless mode
*/
static int __init setup_tick_nohz(char *str)
{
return (kstrtobool(str, &tick_nohz_enabled) == 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 18 | 75.00% | 1 | 50.00% |
Kees Cook | 6 | 25.00% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
__setup("nohz=", setup_tick_nohz);
int tick_nohz_tick_stopped(void)
{
return __this_cpu_read(tick_cpu_sched.tick_stopped);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
/**
* tick_nohz_update_jiffies - update jiffies when idle was interrupted
*
* Called from interrupt entry when the CPU was idle
*
* In case the sched_tick was stopped on this CPU, we have to check if jiffies
* must be updated. Otherwise an interrupt handler could use a stale jiffy
* value. We do this unconditionally on any CPU, as we don't know whether the
* CPU, which has the update task assigned is in a long sleep.
*/
static void tick_nohz_update_jiffies(ktime_t now)
{
unsigned long flags;
__this_cpu_write(tick_cpu_sched.idle_waketime, now);
local_irq_save(flags);
tick_do_update_jiffies64(now);
local_irq_restore(flags);
touch_softlockup_watchdog_sched();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 28 | 70.00% | 2 | 28.57% |
Frédéric Weisbecker | 6 | 15.00% | 1 | 14.29% |
Ingo Molnar | 2 | 5.00% | 1 | 14.29% |
Martin Schwidefsky | 2 | 5.00% | 1 | 14.29% |
Tejun Heo | 1 | 2.50% | 1 | 14.29% |
Jaswinder Singh Rajput | 1 | 2.50% | 1 | 14.29% |
Total | 40 | 100.00% | 7 | 100.00% |
/*
* Updates the per-CPU time idle statistics counters
*/
static void
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
{
ktime_t delta;
if (ts->idle_active) {
delta = ktime_sub(now, ts->idle_entrytime);
if (nr_iowait_cpu(cpu) > 0)
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
else
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
ts->idle_entrytime = now;
}
if (last_update_time)
*last_update_time = ktime_to_us(now);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arjan van de Ven | 55 | 56.70% | 4 | 44.44% |
Venkatesh Pallipadi | 18 | 18.56% | 1 | 11.11% |
Michal Hocko | 14 | 14.43% | 1 | 11.11% |
Peter Zijlstra | 6 | 6.19% | 1 | 11.11% |
Martin Schwidefsky | 3 | 3.09% | 1 | 11.11% |
Thomas Gleixner | 1 | 1.03% | 1 | 11.11% |
Total | 97 | 100.00% | 9 | 100.00% |
static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
{
update_ts_time_stats(smp_processor_id(), ts, now, NULL);
ts->idle_active = 0;
sched_clock_idle_wakeup_event();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arjan van de Ven | 18 | 51.43% | 2 | 28.57% |
Venkatesh Pallipadi | 7 | 20.00% | 1 | 14.29% |
Frédéric Weisbecker | 6 | 17.14% | 1 | 14.29% |
Peter Zijlstra | 4 | 11.43% | 3 | 42.86% |
Total | 35 | 100.00% | 7 | 100.00% |
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{
ktime_t now = ktime_get();
ts->idle_entrytime = now;
ts->idle_active = 1;
sched_clock_idle_sleep_event();
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Venkatesh Pallipadi | 28 | 80.00% | 1 | 25.00% |
Peter Zijlstra | 3 | 8.57% | 1 | 25.00% |
Karsten Wiese | 2 | 5.71% | 1 | 25.00% |
Michal Hocko | 2 | 5.71% | 1 | 25.00% |
Total | 35 | 100.00% | 4 | 100.00% |
/**
* get_cpu_idle_time_us - get the total idle time of a CPU
* @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update
* counters if NULL.
*
* Return the cumulative idle time (since boot) for a given
* CPU, in microseconds.
*
* This time is measured via accounting rather than sampling,
* and is as accurate as ktime_get() is.
*
* This function returns -1 if NOHZ is not enabled.
*/
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t now, idle;
if (!tick_nohz_active)
return -1;
now = ktime_get();
if (last_update_time) {
update_ts_time_stats(cpu, ts, now, last_update_time);
idle = ts->idle_sleeptime;
} else {
if (ts->idle_active && !nr_iowait_cpu(cpu)) {
ktime_t delta = ktime_sub(now, ts->idle_entrytime);
idle = ktime_add(ts->idle_sleeptime, delta);
} else {
idle = ts->idle_sleeptime;
}
}
return ktime_to_us(idle);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 72 | 59.02% | 1 | 14.29% |
Venkatesh Pallipadi | 42 | 34.43% | 2 | 28.57% |
Arjan van de Ven | 5 | 4.10% | 2 | 28.57% |
Peter Zijlstra | 2 | 1.64% | 1 | 14.29% |
Thomas Gleixner | 1 | 0.82% | 1 | 14.29% |
Total | 122 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
/**
* get_cpu_iowait_time_us - get the total iowait time of a CPU
* @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update
* counters if NULL.
*
* Return the cumulative iowait time (since boot) for a given
* CPU, in microseconds.
*
* This time is measured via accounting rather than sampling,
* and is as accurate as ktime_get() is.
*
* This function returns -1 if NOHZ is not enabled.
*/
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t now, iowait;
if (!tick_nohz_active)
return -1;
now = ktime_get();
if (last_update_time) {
update_ts_time_stats(cpu, ts, now, last_update_time);
iowait = ts->iowait_sleeptime;
} else {
if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
ktime_t delta = ktime_sub(now, ts->idle_entrytime);
iowait = ktime_add(ts->iowait_sleeptime, delta);
} else {
iowait = ts->iowait_sleeptime;
}
}
return ktime_to_us(iowait);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 69 | 56.10% | 1 | 25.00% |
Arjan van de Ven | 51 | 41.46% | 1 | 25.00% |
Peter Zijlstra | 2 | 1.63% | 1 | 25.00% |
Thomas Gleixner | 1 | 0.81% | 1 | 25.00% |
Total | 123 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{
hrtimer_cancel(&ts->sched_timer);
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
hrtimer_forward(&ts->sched_timer, now, tick_period);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
else
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
/*
* Reset to make sure next tick stop doesn't get fooled by past
* cached clock deadline.
*/
ts->next_tick = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 79 | 91.86% | 1 | 50.00% |
Frédéric Weisbecker | 7 | 8.14% | 1 | 50.00% |
Total | 86 | 100.00% | 2 | 100.00% |
static inline bool local_timer_softirq_pending(void)
{
return local_softirq_pending() & TIMER_SOFTIRQ;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
ktime_t now, int cpu)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
unsigned long seq, basejiff;
ktime_t tick;
/* Read jiffies and the time when jiffies were updated last */
do {
seq = read_seqbegin(&jiffies_lock);
basemono = last_jiffies_update;
basejiff = jiffies;
} while (read_seqretry(&jiffies_lock, seq));
ts->last_jiffies = basejiff;
/*
* Keep the periodic tick, when RCU, architecture or irq_work
* requests it.
* Aside of that check whether the local timer softirq is
* pending. If so its a bad idea to call get_next_timer_interrupt()
* because there is an already expired timer, so it will request
* immeditate expiry, which rearms the hardware timer with a
* minimal delta which brings us back to this place
* immediately. Lather, rinse and repeat...
*/
if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
irq_work_needs_cpu() || local_timer_softirq_pending()) {
next_tick = basemono + TICK_NSEC;
} else {
/*
* Get the next pending timer. If high resolution
* timers are enabled this only takes the timer wheel
* timers into account. If high resolution timers are
* disabled this also looks at the next expiring
* hrtimer.
*/
next_tmr = get_next_timer_interrupt(basejiff, basemono);
ts->next_timer = next_tmr;
/* Take the next rcu event into account */
next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
}
/*
* If the tick is due in the next period, keep it ticking or
* force prod the timer.
*/
delta = next_tick - basemono;
if (delta <= (u64)TICK_NSEC) {
/*
* Tell the timer code that the base is not idle, i.e. undo
* the effect of get_next_timer_interrupt():
*/
timer_clear_idle();
/*
* We've not stopped the tick yet, and there's a timer in the
* next period, so no point in stopping it either, bail.
*/
if (!ts->tick_stopped) {
tick = 0;
goto out;
}
}
/*
* If this CPU is the one which updates jiffies, then give up
* the assignment and let it be taken by the CPU which runs
* the tick timer next, which might be this CPU as well. If we
* don't drop this here the jiffies might be stale and
* do_timer() never invoked. Keep track of the fact that it
* was the one which had the do_timer() duty last. If this CPU
* is the one which had the do_timer() duty last, we limit the
* sleep time to the timekeeping max_deferment value.
* Otherwise we can sleep as long as we want.
*/
delta = timekeeping_max_deferment();
if (cpu == tick_do_timer_cpu) {
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
ts->do_timer_last = 1;
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
delta = KTIME_MAX;
ts->do_timer_last = 0;
} else if (!ts->do_timer_last) {
delta = KTIME_MAX;
}
#ifdef CONFIG_NO_HZ_FULL
/* Limit the tick delta to the maximum scheduler deferment */
if (!ts->inidle)
delta = min(delta, scheduler_tick_max_deferment());
#endif
/* Calculate the next expiry time */
if (delta < (KTIME_MAX - basemono))
expires = basemono + delta;
else
expires = KTIME_MAX;
expires = min_t(u64, expires, next_tick);
tick = expires;
/* Skip reprogram of event if its not changed */
if (ts->tick_stopped && (expires == ts->next_tick)) {
/* Sanity check: make sure clockevent is actually programmed */
if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
goto out;
WARN_ON_ONCE(1);
printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
basemono, ts->next_tick, dev->next_event,
hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
}
/*
* nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when
* interrupts arrive which do not cause a reschedule. In the
* first call we save the current tick time, so we can restart
* the scheduler tick in nohz_restart_sched_tick.
*/
if (!ts->tick_stopped) {
calc_load_nohz_start();
cpu_load_update_nohz_start();
quiet_vmstat();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
trace_tick_stop(1, TICK_DEP_MASK_NONE);
}
ts->next_tick = tick;
/*
* If the expiration time == KTIME_MAX, then we simply stop
* the tick timer.
*/
if (unlikely(expires == KTIME_MAX)) {
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_cancel(&ts->sched_timer);
goto out;
}
hrtimer_set_expires(&ts->sched_timer, tick);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
else
tick_program_event(tick, 1);
out:
/*
* Update the estimated sleep length until the next timer
* (not only the tick).
*/
ts->sleep_length = ktime_sub(dev->next_event, now);
return tick;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 280 | 54.58% | 8 | 20.51% |
Frédéric Weisbecker | 130 | 25.34% | 15 | 38.46% |
Len Brown | 23 | 4.48% | 1 | 2.56% |
Richard Woodruff | 18 | 3.51% | 1 | 2.56% |
Jon Hunter | 17 | 3.31% | 1 | 2.56% |
Martin Schwidefsky | 15 | 2.92% | 2 | 5.13% |
Ingo Molnar | 8 | 1.56% | 2 | 5.13% |
Peter Zijlstra | 8 | 1.56% | 4 | 10.26% |
Paul E. McKenney | 5 | 0.97% | 1 | 2.56% |
Arjan van de Ven | 4 | 0.78% | 1 | 2.56% |
John Stultz | 2 | 0.39% | 1 | 2.56% |
Christoph Lameter | 2 | 0.39% | 1 | 2.56% |
Arun R Bharadwaj | 1 | 0.19% | 1 | 2.56% |
Total | 513 | 100.00% | 39 | 100.00% |
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
cpu_load_update_nohz_stop();
/*
* Clear the timer idle flag, so we avoid IPIs on remote queueing and
* the clock forward checks in the enqueue path:
*/
timer_clear_idle();
calc_load_nohz_stop();
touch_softlockup_watchdog_sched();
/*
* Cancel the scheduled timer and restore the tick
*/
ts->tick_stopped = 0;
ts->idle_exittime = now;
tick_nohz_restart(ts, now);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 48 | 90.57% | 4 | 66.67% |
Thomas Gleixner | 4 | 7.55% | 1 | 16.67% |
Tejun Heo | 1 | 1.89% | 1 | 16.67% |
Total | 53 | 100.00% | 6 | 100.00% |
static void tick_nohz_full_update_tick(struct tick_sched *ts)
{
#ifdef CONFIG_NO_HZ_FULL
int cpu = smp_processor_id();
if (!tick_nohz_full_cpu(cpu))
return;
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
return;
if (can_stop_full_tick(cpu, ts))
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
else if (ts->tick_stopped)
tick_nohz_restart_sched_tick(ts, ktime_get());
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 77 | 97.47% | 3 | 75.00% |
Wanpeng Li | 2 | 2.53% | 1 | 25.00% |
Total | 79 | 100.00% | 4 | 100.00% |
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
{
/*
* If this CPU is offline and it is the one which updates
* jiffies, then give up the assignment and let it be taken by
* the CPU which runs the tick timer next. If we don't drop
* this here the jiffies might be stale and do_timer() never
* invoked.
*/
if (unlikely(!cpu_online(cpu))) {
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
/*
* Make sure the CPU doesn't get fooled by obsolete tick
* deadline if it comes back online later.
*/
ts->next_tick = 0;
return false;
}
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
ts->sleep_length = NSEC_PER_SEC / HZ;
return false;
}
if (need_resched())
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
if (ratelimit < 10 &&
(local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
pr_warn("NOHZ: local_softirq_pending %02x\n",
(unsigned int) local_softirq_pending());
ratelimit++;
}
return false;
}
if (tick_nohz_full_enabled()) {
/*
* Keep the tick alive to guarantee timekeeping progression
* if there are full dynticks CPUs around
*/
if (tick_do_timer_cpu == cpu)
return false;
/*
* Boot safety: make sure the timekeeping duty has been
* assigned before entering dyntick-idle mode,
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
return false;
}
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 140 | 86.42% | 4 | 44.44% |
Thomas Gleixner | 13 | 8.02% | 2 | 22.22% |
Paul E. McKenney | 7 | 4.32% | 1 | 11.11% |
Rado Vrbovsky | 1 | 0.62% | 1 | 11.11% |
Ingo Molnar | 1 | 0.62% | 1 | 11.11% |
Total | 162 | 100.00% | 9 | 100.00% |
static void __tick_nohz_idle_enter(struct tick_sched *ts)
{
ktime_t now, expires;
int cpu = smp_processor_id();
now = tick_nohz_start_idle(ts);
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
ts->idle_calls++;
expires = tick_nohz_stop_sched_tick(ts, now, cpu);
if (expires > 0LL) {
ts->idle_sleeps++;
ts->idle_expires = expires;
}
if (!was_stopped && ts->tick_stopped) {
ts->idle_jiffies = ts->last_jiffies;
nohz_balance_enter_idle(cpu);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 81 | 76.42% | 6 | 66.67% |
Thomas Gleixner | 13 | 12.26% | 1 | 11.11% |
Wanpeng Li | 7 | 6.60% | 1 | 11.11% |
Len Brown | 5 | 4.72% | 1 | 11.11% |
Total | 106 | 100.00% | 9 | 100.00% |
/**
* tick_nohz_idle_enter - stop the idle tick from the idle task
*
* When the next event is more than a tick into the future, stop the idle tick
* Called when we start the idle loop.
*
* The arch is responsible of calling:
*
* - rcu_idle_enter() after its last use of RCU before the CPU is put
* to sleep.
* - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
*/
void tick_nohz_idle_enter(void)
{
struct tick_sched *ts;
lockdep_assert_irqs_enabled();
/*
* Update the idle state in the scheduler domain hierarchy
* when tick_nohz_stop_sched_tick() is called from the idle loop.
* State will be updated to busy during the first busy tick after
* exiting idle.
*/
set_cpu_sd_state_idle();
local_irq_disable();
ts = this_cpu_ptr(&tick_cpu_sched);
ts->inidle = 1;
__tick_nohz_idle_enter(ts);
local_irq_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 37 | 84.09% | 4 | 50.00% |
Linus Torvalds | 3 | 6.82% | 1 | 12.50% |
Christoph Lameter | 2 | 4.55% | 1 | 12.50% |
Ingo Molnar | 1 | 2.27% | 1 | 12.50% |
Paul E. McKenney | 1 | 2.27% | 1 | 12.50% |
Total | 44 | 100.00% | 8 | 100.00% |
/**
* tick_nohz_irq_exit - update next tick event from interrupt exit
*
* When an interrupt fires while we are idle and it doesn't cause
* a reschedule, it may still add, modify or delete a timer, enqueue
* an RCU callback, etc...
* So we need to re-calculate and reprogram the next tick event.
*/
void tick_nohz_irq_exit(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->inidle)
__tick_nohz_idle_enter(ts);
else
tick_nohz_full_update_tick(ts);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 30 | 85.71% | 4 | 66.67% |
Thomas Gleixner | 3 | 8.57% | 1 | 16.67% |
Christoph Lameter | 2 | 5.71% | 1 | 16.67% |
Total | 35 | 100.00% | 6 | 100.00% |
/**
* tick_nohz_get_sleep_length - return the length of the current sleep
*
* Called from power state control code with interrupts disabled
*/
ktime_t tick_nohz_get_sleep_length(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return ts->sleep_length;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Len Brown | 21 | 91.30% | 1 | 50.00% |
Christoph Lameter | 2 | 8.70% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
* for a particular CPU.
*
* Called from the schedutil frequency scaling governor in scheduler context.
*/
unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
{
struct tick_sched *ts = tick_get_tick_sched(cpu);
return ts->idle_calls;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Joel Fernandes | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
/**
* tick_nohz_get_idle_calls - return the current idle calls counter value
*
* Called from the schedutil frequency scaling governor in scheduler context.
*/
unsigned long tick_nohz_get_idle_calls(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return ts->idle_calls;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
unsigned long ticks;
if (vtime_accounting_cpu_enabled())
return;
/*
* We stopped the tick in idle. Update process times would miss the
* time we slept as update_process_times does only a 1 tick
* accounting. Enforce that this is accounted to idle !
*/
ticks = jiffies - ts->idle_jiffies;
/*
* We might be one off. Do not randomly account a huge number of ticks!
*/
if (ticks && ticks < LONG_MAX)
account_idle_ticks(ticks);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 49 | 100.00% | 4 | 100.00% |
Total | 49 | 100.00% | 4 | 100.00% |
/**
* tick_nohz_idle_exit - restart the idle tick from the idle task
*
* Restart the idle tick when the CPU is woken up from idle
* This also exit the RCU extended quiescent state. The CPU
* can use RCU again after this function is called.
*/
void tick_nohz_idle_exit(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
local_irq_disable();
WARN_ON_ONCE(!ts->inidle);
ts->inidle = 0;
if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
if (ts->idle_active)
tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) {
tick_nohz_restart_sched_tick(ts, now);
tick_nohz_account_idle_ticks(ts);
}
local_irq_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 37 | 41.57% | 2 | 20.00% |
Martin Schwidefsky | 23 | 25.84% | 1 | 10.00% |
Frédéric Weisbecker | 19 | 21.35% | 5 | 50.00% |
Venkatesh Pallipadi | 8 | 8.99% | 1 | 10.00% |
Christoph Lameter | 2 | 2.25% | 1 | 10.00% |
Total | 89 | 100.00% | 10 | 100.00% |
/*
* The nohz low res interrupt handler
*/
static void tick_nohz_handler(struct clock_event_device *dev)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
dev->next_event = KTIME_MAX;
tick_sched_do_timer(now);
tick_sched_handle(ts, regs);
/* No need to reprogram if we are running tickless */
if (unlikely(ts->tick_stopped))
return;
hrtimer_forward(&ts->sched_timer, now, tick_period);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 73 | 81.11% | 2 | 33.33% |
Viresh Kumar | 11 | 12.22% | 1 | 16.67% |
Frédéric Weisbecker | 4 | 4.44% | 2 | 33.33% |
Christoph Lameter | 2 | 2.22% | 1 | 16.67% |
Total | 90 | 100.00% | 6 | 100.00% |
static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
{
if (!tick_nohz_enabled)
return;
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
timers_update_migration(true);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 44 | 100.00% | 2 | 100.00% |
Total | 44 | 100.00% | 2 | 100.00% |
/**
* tick_nohz_switch_to_nohz - switch to nohz mode
*/
static void tick_nohz_switch_to_nohz(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t next;
if (!tick_nohz_enabled)
return;
if (tick_switch_to_oneshot(tick_nohz_handler))
return;
/*
* Recycle the hrtimer in ts, so we can share the
* hrtimer_forward with the highres code.
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
/* Get the next period */
next = tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
hrtimer_forward_now(&ts->sched_timer, tick_period);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 77 | 81.05% | 3 | 42.86% |
Wanpeng Li | 11 | 11.58% | 1 | 14.29% |
Arjan van de Ven | 4 | 4.21% | 1 | 14.29% |
Christoph Lameter | 2 | 2.11% | 1 | 14.29% |
Viresh Kumar | 1 | 1.05% | 1 | 14.29% |
Total | 95 | 100.00% | 7 | 100.00% |
static inline void tick_nohz_irq_enter(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
if (!ts->idle_active && !ts->tick_stopped)
return;
now = ktime_get();
if (ts->idle_active)
tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped)
tick_nohz_update_jiffies(now);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 60 | 92.31% | 1 | 25.00% |
Frédéric Weisbecker | 3 | 4.62% | 2 | 50.00% |
Christoph Lameter | 2 | 3.08% | 1 | 25.00% |
Total | 65 | 100.00% | 4 | 100.00% |
#else
static inline void tick_nohz_switch_to_nohz(void) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 8 | 100.00% | 1 | 100.00% |
Total | 8 | 100.00% | 1 | 100.00% |
static inline void tick_nohz_irq_enter(void) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 6 | 75.00% | 1 | 33.33% |
Frédéric Weisbecker | 2 | 25.00% | 2 | 66.67% |
Total | 8 | 100.00% | 3 | 100.00% |
static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Called from irq_enter to notify about the possible interruption of idle()
*/
void tick_irq_enter(void)
{
tick_check_oneshot_broadcast_this_cpu();
tick_nohz_irq_enter();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 7 | 53.85% | 2 | 50.00% |
Frédéric Weisbecker | 6 | 46.15% | 2 | 50.00% |
Total | 13 | 100.00% | 4 | 100.00% |
/*
* High resolution timer specific code
*/
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* We rearm the timer until we get disabled by the idle code.
* Called with interrupts disabled.
*/
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
struct tick_sched *ts =
container_of(timer, struct tick_sched, sched_timer);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
tick_sched_do_timer(now);
/*
* Do not call, when we are not in irq context and have
* no valid regs pointer
*/
if (regs)
tick_sched_handle(ts, regs);
else
ts->next_tick = 0;
/* No need to reprogram if we are in idle or full dynticks mode */
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
hrtimer_forward(timer, now, tick_period);
return HRTIMER_RESTART;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 66 | 73.33% | 1 | 16.67% |
Viresh Kumar | 13 | 14.44% | 1 | 16.67% |
Frédéric Weisbecker | 11 | 12.22% | 4 | 66.67% |
Total | 90 | 100.00% | 6 | 100.00% |
static int sched_skew_tick;
static int __init skew_tick(char *str)
{
get_option(&str, &sched_skew_tick);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
early_param("skew_tick", skew_tick);
/**
* tick_setup_sched_timer - setup the tick emulation timer
*/
void tick_setup_sched_timer(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now = ktime_get();
/*
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per-CPU) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
u64 offset = ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 77 | 61.11% | 3 | 37.50% |
Mike Galbraith | 39 | 30.95% | 1 | 12.50% |
Arjan van de Ven | 6 | 4.76% | 1 | 12.50% |
Christoph Lameter | 2 | 1.59% | 1 | 12.50% |
Arun R Bharadwaj | 1 | 0.79% | 1 | 12.50% |
Ingo Molnar | 1 | 0.79% | 1 | 12.50% |
Total | 126 | 100.00% | 8 | 100.00% |
#endif /* HIGH_RES_TIMERS */
#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
void tick_cancel_sched_timer(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
# ifdef CONFIG_HIGH_RES_TIMERS
if (ts->sched_timer.base)
hrtimer_cancel(&ts->sched_timer);
# endif
memset(ts, 0, sizeof(*ts));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 50 | 90.91% | 2 | 66.67% |
Miao Xie | 5 | 9.09% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
#endif
/**
* Async notification about clocksource changes
*/
void tick_clock_notify(void)
{
int cpu;
for_each_possible_cpu(cpu)
set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
/*
* Async notification about clock event changes
*/
void tick_oneshot_notify(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
set_bit(0, &ts->check_clocks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 26 | 92.86% | 1 | 50.00% |
Christoph Lameter | 2 | 7.14% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
/**
* Check, if a change happened, which makes oneshot possible.
*
* Called cyclic from the hrtimer softirq (driven by the timer
* softirq) allow_nohz signals, that we can switch into low-res nohz
* mode, because high resolution timers are disabled (either compile
* or runtime). Called with interrupts disabled.
*/
int tick_check_oneshot_change(int allow_nohz)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (!test_and_clear_bit(0, &ts->check_clocks))
return 0;
if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
return 0;
if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
return 0;
if (!allow_nohz)
return 1;
tick_nohz_switch_to_nohz();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 70 | 95.89% | 1 | 33.33% |
Christoph Lameter | 2 | 2.74% | 1 | 33.33% |
Li Zefan | 1 | 1.37% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Frédéric Weisbecker | 1655 | 41.40% | 58 | 38.41% |
Thomas Gleixner | 1312 | 32.82% | 22 | 14.57% |
Michal Hocko | 157 | 3.93% | 3 | 1.99% |
Arjan van de Ven | 148 | 3.70% | 5 | 3.31% |
Venkatesh Pallipadi | 111 | 2.78% | 2 | 1.32% |
Martin Schwidefsky | 109 | 2.73% | 3 | 1.99% |
Ingo Molnar | 68 | 1.70% | 10 | 6.62% |
Len Brown | 50 | 1.25% | 1 | 0.66% |
Mike Galbraith | 43 | 1.08% | 1 | 0.66% |
Wanpeng Li | 37 | 0.93% | 3 | 1.99% |
Viresh Kumar | 35 | 0.88% | 4 | 2.65% |
Peter Zijlstra | 35 | 0.88% | 8 | 5.30% |
Rafael J. Wysocki | 25 | 0.63% | 1 | 0.66% |
Joel Fernandes | 25 | 0.63% | 1 | 0.66% |
Christoph Lameter | 24 | 0.60% | 3 | 1.99% |
Sebastian Andrzej Siewior | 23 | 0.58% | 1 | 0.66% |
Paul E. McKenney | 21 | 0.53% | 4 | 2.65% |
Richard Woodruff | 18 | 0.45% | 1 | 0.66% |
Arnd Bergmann | 18 | 0.45% | 1 | 0.66% |
Jon Hunter | 17 | 0.43% | 1 | 0.66% |
Miao Xie | 14 | 0.35% | 1 | 0.66% |
Li Zhong | 10 | 0.25% | 1 | 0.66% |
John Stultz | 10 | 0.25% | 2 | 1.32% |
Kees Cook | 8 | 0.20% | 1 | 0.66% |
Tejun Heo | 6 | 0.15% | 2 | 1.32% |
Linus Torvalds | 3 | 0.08% | 1 | 0.66% |
David S. Miller | 3 | 0.08% | 1 | 0.66% |
Joe Perches | 3 | 0.08% | 1 | 0.66% |
Arun R Bharadwaj | 2 | 0.05% | 1 | 0.66% |
Karsten Wiese | 2 | 0.05% | 1 | 0.66% |
Rado Vrbovsky | 1 | 0.03% | 1 | 0.66% |
Chuansheng Liu | 1 | 0.03% | 1 | 0.66% |
Wei Jiangang | 1 | 0.03% | 1 | 0.66% |
Li Zefan | 1 | 0.03% | 1 | 0.66% |
Jaswinder Singh Rajput | 1 | 0.03% | 1 | 0.66% |
Pavel Machek | 1 | 0.03% | 1 | 0.66% |
Total | 3998 | 100.00% | 151 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.