Release 4.15 kernel/time/timekeeping.c
/*
* linux/kernel/time/timekeeping.c
*
* Kernel timekeeping code and accessor functions
*
* This code was moved from linux/kernel/timer.c.
* Please see that file for copyright and history logs.
*
*/
#include <linux/timekeeper_internal.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
#include <linux/stop_machine.h>
#include <linux/pvclock_gtod.h>
#include <linux/compiler.h>
#include "tick-internal.h"
#include "ntp_internal.h"
#include "timekeeping_internal.h"
#define TK_CLEAR_NTP (1 << 0)
#define TK_MIRROR (1 << 1)
#define TK_CLOCK_WAS_SET (1 << 2)
/*
* The most important data for readout fits into a single 64 byte
* cache line.
*/
static struct {
seqcount_t seq;
struct timekeeper timekeeper;
} tk_core ____cacheline_aligned;
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
/**
* struct tk_fast - NMI safe timekeeper
* @seq: Sequence counter for protecting updates. The lowest bit
* is the index for the tk_read_base array
* @base: tk_read_base array. Access is indexed by the lowest bit of
* @seq.
*
* See @update_fast_timekeeper() below.
*/
struct tk_fast {
seqcount_t seq;
struct tk_read_base base[2];
};
/* Suspend-time cycles value for halted fast timekeeper. */
static u64 cycles_at_suspend;
static u64 dummy_clock_read(struct clocksource *cs)
{
return cycles_at_suspend;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Prarit Bhargava | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
static struct clocksource dummy_clock = {
.read = dummy_clock_read,
};
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
.base[0] = { .clock = &dummy_clock, },
.base[1] = { .clock = &dummy_clock, },
};
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
.base[0] = { .clock = &dummy_clock, },
.base[1] = { .clock = &dummy_clock, },
};
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
tk->xtime_sec++;
}
while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
tk->raw_sec++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 94 | 92.16% | 2 | 50.00% |
Thomas Gleixner | 4 | 3.92% | 1 | 25.00% |
Peter Zijlstra | 4 | 3.92% | 1 | 25.00% |
Total | 102 | 100.00% | 4 | 100.00% |
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
return ts;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 47 | 95.92% | 2 | 66.67% |
Peter Zijlstra | 2 | 4.08% | 1 | 33.33% |
Total | 49 | 100.00% | 3 | 100.00% |
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec = ts->tv_sec;
tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 40 | 90.91% | 3 | 60.00% |
Thomas Gleixner | 2 | 4.55% | 1 | 20.00% |
Peter Zijlstra | 2 | 4.55% | 1 | 20.00% |
Total | 44 | 100.00% | 5 | 100.00% |
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec += ts->tv_sec;
tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk_normalize_xtime(tk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 45 | 91.84% | 4 | 66.67% |
Peter Zijlstra | 2 | 4.08% | 1 | 16.67% |
Thomas Gleixner | 2 | 4.08% | 1 | 16.67% |
Total | 49 | 100.00% | 6 | 100.00% |
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
{
struct timespec64 tmp;
/*
* Verify consistency of: offset_real = -wall_to_monotonic
* before modifying anything
*/
set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
-tk->wall_to_monotonic.tv_nsec);
WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
tk->wall_to_monotonic = wtm;
set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
tk->offs_real = timespec64_to_ktime(tmp);
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 103 | 100.00% | 4 | 100.00% |
Total | 103 | 100.00% | 4 | 100.00% |
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
{
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 20 | 71.43% | 1 | 50.00% |
Thomas Gleixner | 8 | 28.57% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
/*
* tk_clock_read - atomic clocksource read() helper
*
* This helper is necessary to use in the read paths because, while the
* seqlock ensures we don't return a bad value while structures are updated,
* it doesn't protect from potential crashes. There is the possibility that
* the tkr's clocksource may change between the read reference, and the
* clock reference passed to the read function. This can cause crashes if
* the wrong clocksource is passed to the wrong read function.
* This isn't necessary to use when holding the timekeeper_lock or doing
* a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic).
*/
static inline u64 tk_clock_read(struct tk_read_base *tkr)
{
struct clocksource *clock = READ_ONCE(tkr->clock);
return clock->read(clock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300)
/* 5 minute rate-limiting */
static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
u64 max_cycles = tk->tkr_mono.clock->max_cycles;
const char *name = tk->tkr_mono.clock->name;
if (offset > max_cycles) {
printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
offset, name, max_cycles);
printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
} else {
if (offset > (max_cycles >> 1)) {
printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
offset, name, max_cycles >> 1);
printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
}
}
if (tk->underflow_seen) {
if (jiffies - tk->last_warning > WARNING_FREQ) {
printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
printk_deferred(" Your kernel is probably still fine.\n");
tk->last_warning = jiffies;
}
tk->underflow_seen = 0;
}
if (tk->overflow_seen) {
if (jiffies - tk->last_warning > WARNING_FREQ) {
printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
printk_deferred(" Your kernel is probably still fine.\n");
tk->last_warning = jiffies;
}
tk->overflow_seen = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 188 | 97.41% | 4 | 57.14% |
Thomas Gleixner | 2 | 1.04% | 1 | 14.29% |
Peter Zijlstra | 2 | 1.04% | 1 | 14.29% |
Masanari Iida | 1 | 0.52% | 1 | 14.29% |
Total | 193 | 100.00% | 7 | 100.00% |
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 now, last, mask, max, delta;
unsigned int seq;
/*
* Since we're called holding a seqlock, the data may shift
* under us while we're doing the calculation. This can cause
* false positives, since we'd note a problem but throw the
* results away. So nest another seqlock here to atomically
* grab the points we are checking with.
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
} while (read_seqcount_retry(&tk_core.seq, seq));
delta = clocksource_delta(now, last, mask);
/*
* Try to catch underflows by checking if we are seeing small
* mask-relative negative values.
*/
if (unlikely((~delta & mask) < (mask >> 3))) {
tk->underflow_seen = 1;
delta = 0;
}
/* Cap delta value to the max_cycles values to avoid mult overflows */
if (unlikely(delta > max)) {
tk->overflow_seen = 1;
delta = tkr->clock->max_cycles;
}
return delta;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 160 | 98.77% | 5 | 83.33% |
Thomas Gleixner | 2 | 1.23% | 1 | 16.67% |
Total | 162 | 100.00% | 6 | 100.00% |
#else
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 13 | 92.86% | 1 | 50.00% |
Thomas Gleixner | 1 | 7.14% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
{
u64 cycle_now, delta;
/* read clocksource */
cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
return delta;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 42 | 95.45% | 2 | 66.67% |
Thomas Gleixner | 2 | 4.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
#endif
/**
* tk_setup_internals - Set up internals to use clocksource clock.
*
* @tk: The target timekeeper to setup.
* @clock: Pointer to clocksource.
*
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
* pair and interval request.
*
* Unless you're the timekeeping code, you should not be using this!
*/
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
{
u64 interval;
u64 tmp, ntpinterval;
struct clocksource *old_clock;
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
tk->tkr_mono.mask = clock->mask;
tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
tmp <<= clock->shift;
ntpinterval = tmp;
tmp += clock->mult/2;
do_div(tmp, clock->mult);
if (tmp == 0)
tmp = 1;
interval = (u64) tmp;
tk->cycle_interval = interval;
/* Go back from cycles -> shifted ns */
tk->xtime_interval = interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
int shift_change = clock->shift - old_clock->shift;
if (shift_change < 0) {
tk->tkr_mono.xtime_nsec >>= -shift_change;
tk->tkr_raw.xtime_nsec >>= -shift_change;
} else {
tk->tkr_mono.xtime_nsec <<= shift_change;
tk->tkr_raw.xtime_nsec <<= shift_change;
}
}
tk->tkr_mono.shift = clock->shift;
tk->tkr_raw.shift = clock->shift;
tk->ntp_error = 0;
tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
/*
* The timekeeper keeps its own mult values for the currently
* active clocksource. These value will be adjusted via NTP
* to counteract clock drifting.
*/
tk->tkr_mono.mult = clock->mult;
tk->tkr_raw.mult = clock->mult;
tk->ntp_err_mult = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 113 | 34.66% | 3 | 17.65% |
John Stultz | 107 | 32.82% | 6 | 35.29% |
Peter Zijlstra | 65 | 19.94% | 2 | 11.76% |
Thomas Gleixner | 24 | 7.36% | 4 | 23.53% |
Kasper Pedersen | 12 | 3.68% | 1 | 5.88% |
Christopher S. Hall | 5 | 1.53% | 1 | 5.88% |
Total | 326 | 100.00% | 17 | 100.00% |
/* Timekeeper helper functions. */
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
static u32 default_arch_gettimeoffset(void) { return 0; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
#else
static inline u32 arch_gettimeoffset(void) { return 0; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Warren | 11 | 91.67% | 1 | 50.00% |
Thomas Gleixner | 1 | 8.33% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
#endif
static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
{
u64 nsec;
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
/* If arch requires, add in get_arch_timeoffset() */
return nsec + arch_gettimeoffset();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 32.56% | 6 | 50.00% |
John Stultz | 14 | 32.56% | 3 | 25.00% |
Christopher S. Hall | 7 | 16.28% | 1 | 8.33% |
Martin Schwidefsky | 7 | 16.28% | 1 | 8.33% |
Stephen Warren | 1 | 2.33% | 1 | 8.33% |
Total | 43 | 100.00% | 12 | 100.00% |
static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
{
u64 delta;
delta = timekeeping_get_delta(tkr);
return timekeeping_delta_to_ns(tkr, delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 28 | 93.33% | 1 | 33.33% |
Thomas Gleixner | 2 | 6.67% | 2 | 66.67% |
Total | 30 | 100.00% | 3 | 100.00% |
static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
{
u64 delta;
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
return timekeeping_delta_to_ns(tkr, delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 39 | 92.86% | 1 | 33.33% |
Thomas Gleixner | 3 | 7.14% | 2 | 66.67% |
Total | 42 | 100.00% | 3 | 100.00% |
/**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
* @tkr: Timekeeping readout base from which we take the update
*
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
* Employ the latch technique; see @raw_write_seqcount_latch.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
* slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns.
*/
static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
{
struct tk_read_base *base = tkf->base;
/* Force readers off to base[1] */
raw_write_seqcount_latch(&tkf->seq);
/* Update base[0] */
memcpy(base, tkr, sizeof(*base));
/* Force readers back to base[0] */
raw_write_seqcount_latch(&tkf->seq);
/* Update base[1] */
memcpy(base + 1, base, sizeof(*base));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 60 | 82.19% | 1 | 33.33% |
Peter Zijlstra | 11 | 15.07% | 1 | 33.33% |
Rafael J. Wysocki | 2 | 2.74% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
/**
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
*
* This timestamp is not guaranteed to be monotonic across an update.
* The timestamp is calculated by:
*
* now = base_mono + clock_delta * slope
*
* So if the update lowers the slope, readers who are forced to the
* not yet updated second array are still using the old steeper slope.
*
* tmono
* ^
* | o n
* | o n
* | u
* | o
* |o
* |12345678---> reader order
*
* o = old slope
* u = update
* n = new slope
*
* So reader 6 will observe time going backwards versus reader 5.
*
* While other CPUs are likely to be able observe that, the only way
* for a CPU local observation is when an NMI hits in the middle of
* the update. Timestamps taken from that NMI context might be ahead
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
u64 now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 61 | 62.89% | 1 | 14.29% |
John Stultz | 21 | 21.65% | 3 | 42.86% |
Peter Zijlstra | 15 | 15.46% | 3 | 42.86% |
Total | 97 | 100.00% | 7 | 100.00% |
u64 ktime_get_mono_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_mono);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
u64 ktime_get_raw_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_raw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
/**
* ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
*
* To keep it NMI safe since we're accessing from tracing, we're not using a
* separate timekeeper with updates to monotonic clock and boot offset
* protected with seqlocks. This has the following minor side effects:
*
* (1) Its possible that a timestamp be taken after the boot offset is updated
* but before the timekeeper is updated. If this happens, the new boot offset
* is added to the old timekeeping making the clock appear to update slightly
* earlier:
* CPU 0 CPU 1
* timekeeping_inject_sleeptime64()
* __timekeeping_inject_sleeptime(tk, delta);
* timestamp();
* timekeeping_update(tk, TK_CLEAR_NTP...);
*
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
*/
u64 notrace ktime_get_boot_fast_ns(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Joel Fernandes | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
/*
* See comment for __ktime_get_fast_ns() vs. timestamp ordering
*/
static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
u64 now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base_real);
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 97 | 100.00% | 1 | 100.00% |
Total | 97 | 100.00% | 1 | 100.00% |
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
*/
u64 ktime_get_real_fast_ns(void)
{
return __ktime_get_real_fast_ns(&tk_fast_mono);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
*
* It generally is unsafe to access the clocksource after timekeeping has been
* suspended, so take a snapshot of the readout base of @tk and use it as the
* fast timekeeper's readout base while suspended. It will return the same
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
static void halt_fast_timekeeper(struct timekeeper *tk)
{
static struct tk_read_base tkr_dummy;
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tk_clock_read(tkr);
tkr_dummy.clock = &dummy_clock;
tkr_dummy.base_real = tkr->base + tk->offs_real;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 54 | 49.09% | 1 | 16.67% |
Peter Zijlstra | 37 | 33.64% | 3 | 50.00% |
Thomas Gleixner | 12 | 10.91% | 1 | 16.67% |
John Stultz | 7 | 6.36% | 1 | 16.67% |
Total | 110 | 100.00% | 6 | 100.00% |
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
{
raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcelo Tosatti | 20 | 83.33% | 1 | 50.00% |
David Vrabel | 4 | 16.67% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
/**
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
*/
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
update_pvclock_gtod(tk, true);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcelo Tosatti | 55 | 87.30% | 1 | 20.00% |
Thomas Gleixner | 6 | 9.52% | 3 | 60.00% |
David Vrabel | 2 | 3.17% | 1 | 20.00% |
Total | 63 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
/**
* pvclock_gtod_unregister_notifier - unregister a pvclock
* timedata update listener
*/
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcelo Tosatti | 42 | 91.30% | 1 | 33.33% |
Thomas Gleixner | 4 | 8.70% | 2 | 66.67% |
Total | 46 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
/*
* tk_update_leap_state - helper to update the next_leap_ktime
*/
static inline void tk_update_leap_state(struct timekeeper *tk)
{
tk->next_leap_ktime = ntp_get_next_leap();
if (tk->next_leap_ktime != KTIME_MAX)
/* Convert to monotonic time */
tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
/*
* Update the ktime_t based scalar nsec members of the timekeeper
*/
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
u64 seconds;
u32 nsec;
/*
* The xtime based monotonic readout is:
* nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
* The ktime based monotonic readout is:
* nsec = base_mono + now();
* ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
*/
seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
nsec = (u32) tk->wall_to_monotonic.tv_nsec;
tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
/*
* The sum of the nanoseconds portions of xtime and
* wall_to_monotonic can be greater/equal one second. Take
* this into account before updating tk->ktime_sec.
*/
nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
if (nsec >= NSEC_PER_SEC)
seconds++;
tk->ktime_sec = seconds;
/* Update the monotonic raw base */
tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heena Sirwani | 47 | 41.59% | 1 | 16.67% |
Thomas Gleixner | 46 | 40.71% | 2 | 33.33% |
John Stultz | 16 | 14.16% | 2 | 33.33% |
Peter Zijlstra | 4 | 3.54% | 1 | 16.67% |
Total | 113 | 100.00% | 6 | 100.00% |
/* must hold timekeeper_lock */
static void timekeeping_update(struct timekeeper *tk, unsigned int action)
{
if (action & TK_CLEAR_NTP) {
tk->ntp_error = 0;
ntp_clear();
}
tk_update_leap_state(tk);
tk_update_ktime_data(tk);
update_vsyscall(tk);
update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
if (action & TK_CLOCK_WAS_SET)
tk->clock_was_set_seq++;
/*
* The mirroring of the data to the shadow-timekeeper needs
* to happen last here to ensure we don't over-write the
* timekeeper structure on the next update with stale data
*/
if (action & TK_MIRROR)
memcpy(&shadow_timekeeper, &tk_core.timekeeper,
sizeof(tk_core.timekeeper));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 58 | 44.62% | 5 | 31.25% |
John Stultz | 40 | 30.77% | 4 | 25.00% |
Peter Zijlstra | 15 | 11.54% | 3 | 18.75% |
David Vrabel | 10 | 7.69% | 2 | 12.50% |
Marcelo Tosatti | 5 | 3.85% | 1 | 6.25% |
Rafael J. Wysocki | 2 | 1.54% | 1 | 6.25% |
Total | 130 | 100.00% | 16 | 100.00% |
/**
* timekeeping_forward_now - update clock to the current time
*
* Forward the current clock to update its state since the last call to
* update_wall_time(). This is useful before significant clock changes,
* as it avoids having to deal with this time offset explicitly.
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
u64 cycle_now, delta;
cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
/* If arch requires, add in get_arch_timeoffset() */
tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
/* If arch requires, add in get_arch_timeoffset() */
tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
tk_normalize_xtime(tk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 70 | 53.03% | 7 | 31.82% |
Thomas Gleixner | 27 | 20.45% | 7 | 31.82% |
Peter Zijlstra | 22 | 16.67% | 2 | 9.09% |
Martin Schwidefsky | 6 | 4.55% | 3 | 13.64% |
Andreas Schwab | 3 | 2.27% | 1 | 4.55% |
Roman Zippel | 3 | 2.27% | 1 | 4.55% |
Stephen Warren | 1 | 0.76% | 1 | 4.55% |
Total | 132 | 100.00% | 22 | 100.00% |
/**
* __getnstimeofday64 - Returns the time of day in a timespec64.
* @ts: pointer to the timespec to be set
*
* Updates the time of day in the timespec.
* Returns 0 on success, or -ve when suspended (timespec will be undefined).
*/
int __getnstimeofday64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->xtime_sec;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_nsec = 0;
timespec64_add_ns