Release 4.15 kernel/time/timekeeping.c
/*
* linux/kernel/time/timekeeping.c
*
* Kernel timekeeping code and accessor functions
*
* This code was moved from linux/kernel/timer.c.
* Please see that file for copyright and history logs.
*
*/
#include <linux/timekeeper_internal.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
#include <linux/stop_machine.h>
#include <linux/pvclock_gtod.h>
#include <linux/compiler.h>
#include "tick-internal.h"
#include "ntp_internal.h"
#include "timekeeping_internal.h"
#define TK_CLEAR_NTP (1 << 0)
#define TK_MIRROR (1 << 1)
#define TK_CLOCK_WAS_SET (1 << 2)
/*
* The most important data for readout fits into a single 64 byte
* cache line.
*/
static struct {
seqcount_t seq;
struct timekeeper timekeeper;
} tk_core ____cacheline_aligned;
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
/**
* struct tk_fast - NMI safe timekeeper
* @seq: Sequence counter for protecting updates. The lowest bit
* is the index for the tk_read_base array
* @base: tk_read_base array. Access is indexed by the lowest bit of
* @seq.
*
* See @update_fast_timekeeper() below.
*/
struct tk_fast {
seqcount_t seq;
struct tk_read_base base[2];
};
/* Suspend-time cycles value for halted fast timekeeper. */
static u64 cycles_at_suspend;
static u64 dummy_clock_read(struct clocksource *cs)
{
return cycles_at_suspend;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Prarit Bhargava | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
static struct clocksource dummy_clock = {
.read = dummy_clock_read,
};
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
.base[0] = { .clock = &dummy_clock, },
.base[1] = { .clock = &dummy_clock, },
};
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
.base[0] = { .clock = &dummy_clock, },
.base[1] = { .clock = &dummy_clock, },
};
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
tk->xtime_sec++;
}
while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
tk->raw_sec++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 94 | 92.16% | 2 | 50.00% |
Thomas Gleixner | 4 | 3.92% | 1 | 25.00% |
Peter Zijlstra | 4 | 3.92% | 1 | 25.00% |
Total | 102 | 100.00% | 4 | 100.00% |
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
return ts;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 47 | 95.92% | 2 | 66.67% |
Peter Zijlstra | 2 | 4.08% | 1 | 33.33% |
Total | 49 | 100.00% | 3 | 100.00% |
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec = ts->tv_sec;
tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 40 | 90.91% | 3 | 60.00% |
Thomas Gleixner | 2 | 4.55% | 1 | 20.00% |
Peter Zijlstra | 2 | 4.55% | 1 | 20.00% |
Total | 44 | 100.00% | 5 | 100.00% |
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec += ts->tv_sec;
tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk_normalize_xtime(tk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 45 | 91.84% | 4 | 66.67% |
Peter Zijlstra | 2 | 4.08% | 1 | 16.67% |
Thomas Gleixner | 2 | 4.08% | 1 | 16.67% |
Total | 49 | 100.00% | 6 | 100.00% |
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
{
struct timespec64 tmp;
/*
* Verify consistency of: offset_real = -wall_to_monotonic
* before modifying anything
*/
set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
-tk->wall_to_monotonic.tv_nsec);
WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
tk->wall_to_monotonic = wtm;
set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
tk->offs_real = timespec64_to_ktime(tmp);
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 103 | 100.00% | 4 | 100.00% |
Total | 103 | 100.00% | 4 | 100.00% |
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
{
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 20 | 71.43% | 1 | 50.00% |
Thomas Gleixner | 8 | 28.57% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
/*
* tk_clock_read - atomic clocksource read() helper
*
* This helper is necessary to use in the read paths because, while the
* seqlock ensures we don't return a bad value while structures are updated,
* it doesn't protect from potential crashes. There is the possibility that
* the tkr's clocksource may change between the read reference, and the
* clock reference passed to the read function. This can cause crashes if
* the wrong clocksource is passed to the wrong read function.
* This isn't necessary to use when holding the timekeeper_lock or doing
* a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic).
*/
static inline u64 tk_clock_read(struct tk_read_base *tkr)
{
struct clocksource *clock = READ_ONCE(tkr->clock);
return clock->read(clock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300)
/* 5 minute rate-limiting */
static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
u64 max_cycles = tk->tkr_mono.clock->max_cycles;
const char *name = tk->tkr_mono.clock->name;
if (offset > max_cycles) {
printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
offset, name, max_cycles);
printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
} else {
if (offset > (max_cycles >> 1)) {
printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
offset, name, max_cycles >> 1);
printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
}
}
if (tk->underflow_seen) {
if (jiffies - tk->last_warning > WARNING_FREQ) {
printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
printk_deferred(" Your kernel is probably still fine.\n");
tk->last_warning = jiffies;
}
tk->underflow_seen = 0;
}
if (tk->overflow_seen) {
if (jiffies - tk->last_warning > WARNING_FREQ) {
printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
printk_deferred(" Your kernel is probably still fine.\n");
tk->last_warning = jiffies;
}
tk->overflow_seen = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 188 | 97.41% | 4 | 57.14% |
Thomas Gleixner | 2 | 1.04% | 1 | 14.29% |
Peter Zijlstra | 2 | 1.04% | 1 | 14.29% |
Masanari Iida | 1 | 0.52% | 1 | 14.29% |
Total | 193 | 100.00% | 7 | 100.00% |
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 now, last, mask, max, delta;
unsigned int seq;
/*
* Since we're called holding a seqlock, the data may shift
* under us while we're doing the calculation. This can cause
* false positives, since we'd note a problem but throw the
* results away. So nest another seqlock here to atomically
* grab the points we are checking with.
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
} while (read_seqcount_retry(&tk_core.seq, seq));
delta = clocksource_delta(now, last, mask);
/*
* Try to catch underflows by checking if we are seeing small
* mask-relative negative values.
*/
if (unlikely((~delta & mask) < (mask >> 3))) {
tk->underflow_seen = 1;
delta = 0;
}
/* Cap delta value to the max_cycles values to avoid mult overflows */
if (unlikely(delta > max)) {
tk->overflow_seen = 1;
delta = tkr->clock->max_cycles;
}
return delta;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 160 | 98.77% | 5 | 83.33% |
Thomas Gleixner | 2 | 1.23% | 1 | 16.67% |
Total | 162 | 100.00% | 6 | 100.00% |
#else
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 13 | 92.86% | 1 | 50.00% |
Thomas Gleixner | 1 | 7.14% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
{
u64 cycle_now, delta;
/* read clocksource */
cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
return delta;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 42 | 95.45% | 2 | 66.67% |
Thomas Gleixner | 2 | 4.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
#endif
/**
* tk_setup_internals - Set up internals to use clocksource clock.
*
* @tk: The target timekeeper to setup.
* @clock: Pointer to clocksource.
*
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
* pair and interval request.
*
* Unless you're the timekeeping code, you should not be using this!
*/
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
{
u64 interval;
u64 tmp, ntpinterval;
struct clocksource *old_clock;
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
tk->tkr_mono.mask = clock->mask;
tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
tmp <<= clock->shift;
ntpinterval = tmp;
tmp += clock->mult/2;
do_div(tmp, clock->mult);
if (tmp == 0)
tmp = 1;
interval = (u64) tmp;
tk->cycle_interval = interval;
/* Go back from cycles -> shifted ns */
tk->xtime_interval = interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
int shift_change = clock->shift - old_clock->shift;
if (shift_change < 0) {
tk->tkr_mono.xtime_nsec >>= -shift_change;
tk->tkr_raw.xtime_nsec >>= -shift_change;
} else {
tk->tkr_mono.xtime_nsec <<= shift_change;
tk->tkr_raw.xtime_nsec <<= shift_change;
}
}
tk->tkr_mono.shift = clock->shift;
tk->tkr_raw.shift = clock->shift;
tk->ntp_error = 0;
tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
/*
* The timekeeper keeps its own mult values for the currently
* active clocksource. These value will be adjusted via NTP
* to counteract clock drifting.
*/
tk->tkr_mono.mult = clock->mult;
tk->tkr_raw.mult = clock->mult;
tk->ntp_err_mult = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 113 | 34.66% | 3 | 17.65% |
John Stultz | 107 | 32.82% | 6 | 35.29% |
Peter Zijlstra | 65 | 19.94% | 2 | 11.76% |
Thomas Gleixner | 24 | 7.36% | 4 | 23.53% |
Kasper Pedersen | 12 | 3.68% | 1 | 5.88% |
Christopher S. Hall | 5 | 1.53% | 1 | 5.88% |
Total | 326 | 100.00% | 17 | 100.00% |
/* Timekeeper helper functions. */
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
static u32 default_arch_gettimeoffset(void) { return 0; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
#else
static inline u32 arch_gettimeoffset(void) { return 0; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Warren | 11 | 91.67% | 1 | 50.00% |
Thomas Gleixner | 1 | 8.33% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
#endif
static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
{
u64 nsec;
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
/* If arch requires, add in get_arch_timeoffset() */
return nsec + arch_gettimeoffset();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 32.56% | 6 | 50.00% |
John Stultz | 14 | 32.56% | 3 | 25.00% |
Christopher S. Hall | 7 | 16.28% | 1 | 8.33% |
Martin Schwidefsky | 7 | 16.28% | 1 | 8.33% |
Stephen Warren | 1 | 2.33% | 1 | 8.33% |
Total | 43 | 100.00% | 12 | 100.00% |
static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
{
u64 delta;
delta = timekeeping_get_delta(tkr);
return timekeeping_delta_to_ns(tkr, delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 28 | 93.33% | 1 | 33.33% |
Thomas Gleixner | 2 | 6.67% | 2 | 66.67% |
Total | 30 | 100.00% | 3 | 100.00% |
static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
{
u64 delta;
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
return timekeeping_delta_to_ns(tkr, delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 39 | 92.86% | 1 | 33.33% |
Thomas Gleixner | 3 | 7.14% | 2 | 66.67% |
Total | 42 | 100.00% | 3 | 100.00% |
/**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
* @tkr: Timekeeping readout base from which we take the update
*
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
* Employ the latch technique; see @raw_write_seqcount_latch.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
* slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns.
*/
static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
{
struct tk_read_base *base = tkf->base;
/* Force readers off to base[1] */
raw_write_seqcount_latch(&tkf->seq);
/* Update base[0] */
memcpy(base, tkr, sizeof(*base));
/* Force readers back to base[0] */
raw_write_seqcount_latch(&tkf->seq);
/* Update base[1] */
memcpy(base + 1, base, sizeof(*base));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 60 | 82.19% | 1 | 33.33% |
Peter Zijlstra | 11 | 15.07% | 1 | 33.33% |
Rafael J. Wysocki | 2 | 2.74% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
/**
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
*
* This timestamp is not guaranteed to be monotonic across an update.
* The timestamp is calculated by:
*
* now = base_mono + clock_delta * slope
*
* So if the update lowers the slope, readers who are forced to the
* not yet updated second array are still using the old steeper slope.
*
* tmono
* ^
* | o n
* | o n
* | u
* | o
* |o
* |12345678---> reader order
*
* o = old slope
* u = update
* n = new slope
*
* So reader 6 will observe time going backwards versus reader 5.
*
* While other CPUs are likely to be able observe that, the only way
* for a CPU local observation is when an NMI hits in the middle of
* the update. Timestamps taken from that NMI context might be ahead
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
u64 now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 61 | 62.89% | 1 | 14.29% |
John Stultz | 21 | 21.65% | 3 | 42.86% |
Peter Zijlstra | 15 | 15.46% | 3 | 42.86% |
Total | 97 | 100.00% | 7 | 100.00% |
u64 ktime_get_mono_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_mono);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
u64 ktime_get_raw_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_raw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
/**
* ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
*
* To keep it NMI safe since we're accessing from tracing, we're not using a
* separate timekeeper with updates to monotonic clock and boot offset
* protected with seqlocks. This has the following minor side effects:
*
* (1) Its possible that a timestamp be taken after the boot offset is updated
* but before the timekeeper is updated. If this happens, the new boot offset
* is added to the old timekeeping making the clock appear to update slightly
* earlier:
* CPU 0 CPU 1
* timekeeping_inject_sleeptime64()
* __timekeeping_inject_sleeptime(tk, delta);
* timestamp();
* timekeeping_update(tk, TK_CLEAR_NTP...);
*
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
*/
u64 notrace ktime_get_boot_fast_ns(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Joel Fernandes | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
/*
* See comment for __ktime_get_fast_ns() vs. timestamp ordering
*/
static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
u64 now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base_real);
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 97 | 100.00% | 1 | 100.00% |
Total | 97 | 100.00% | 1 | 100.00% |
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
*/
u64 ktime_get_real_fast_ns(void)
{
return __ktime_get_real_fast_ns(&tk_fast_mono);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
*
* It generally is unsafe to access the clocksource after timekeeping has been
* suspended, so take a snapshot of the readout base of @tk and use it as the
* fast timekeeper's readout base while suspended. It will return the same
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
static void halt_fast_timekeeper(struct timekeeper *tk)
{
static struct tk_read_base tkr_dummy;
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tk_clock_read(tkr);
tkr_dummy.clock = &dummy_clock;
tkr_dummy.base_real = tkr->base + tk->offs_real;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 54 | 49.09% | 1 | 16.67% |
Peter Zijlstra | 37 | 33.64% | 3 | 50.00% |
Thomas Gleixner | 12 | 10.91% | 1 | 16.67% |
John Stultz | 7 | 6.36% | 1 | 16.67% |
Total | 110 | 100.00% | 6 | 100.00% |
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
{
raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcelo Tosatti | 20 | 83.33% | 1 | 50.00% |
David Vrabel | 4 | 16.67% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
/**
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
*/
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
update_pvclock_gtod(tk, true);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcelo Tosatti | 55 | 87.30% | 1 | 20.00% |
Thomas Gleixner | 6 | 9.52% | 3 | 60.00% |
David Vrabel | 2 | 3.17% | 1 | 20.00% |
Total | 63 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
/**
* pvclock_gtod_unregister_notifier - unregister a pvclock
* timedata update listener
*/
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcelo Tosatti | 42 | 91.30% | 1 | 33.33% |
Thomas Gleixner | 4 | 8.70% | 2 | 66.67% |
Total | 46 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
/*
* tk_update_leap_state - helper to update the next_leap_ktime
*/
static inline void tk_update_leap_state(struct timekeeper *tk)
{
tk->next_leap_ktime = ntp_get_next_leap();
if (tk->next_leap_ktime != KTIME_MAX)
/* Convert to monotonic time */
tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
/*
* Update the ktime_t based scalar nsec members of the timekeeper
*/
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
u64 seconds;
u32 nsec;
/*
* The xtime based monotonic readout is:
* nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
* The ktime based monotonic readout is:
* nsec = base_mono + now();
* ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
*/
seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
nsec = (u32) tk->wall_to_monotonic.tv_nsec;
tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
/*
* The sum of the nanoseconds portions of xtime and
* wall_to_monotonic can be greater/equal one second. Take
* this into account before updating tk->ktime_sec.
*/
nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
if (nsec >= NSEC_PER_SEC)
seconds++;
tk->ktime_sec = seconds;
/* Update the monotonic raw base */
tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heena Sirwani | 47 | 41.59% | 1 | 16.67% |
Thomas Gleixner | 46 | 40.71% | 2 | 33.33% |
John Stultz | 16 | 14.16% | 2 | 33.33% |
Peter Zijlstra | 4 | 3.54% | 1 | 16.67% |
Total | 113 | 100.00% | 6 | 100.00% |
/* must hold timekeeper_lock */
static void timekeeping_update(struct timekeeper *tk, unsigned int action)
{
if (action & TK_CLEAR_NTP) {
tk->ntp_error = 0;
ntp_clear();
}
tk_update_leap_state(tk);
tk_update_ktime_data(tk);
update_vsyscall(tk);
update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
if (action & TK_CLOCK_WAS_SET)
tk->clock_was_set_seq++;
/*
* The mirroring of the data to the shadow-timekeeper needs
* to happen last here to ensure we don't over-write the
* timekeeper structure on the next update with stale data
*/
if (action & TK_MIRROR)
memcpy(&shadow_timekeeper, &tk_core.timekeeper,
sizeof(tk_core.timekeeper));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 58 | 44.62% | 5 | 31.25% |
John Stultz | 40 | 30.77% | 4 | 25.00% |
Peter Zijlstra | 15 | 11.54% | 3 | 18.75% |
David Vrabel | 10 | 7.69% | 2 | 12.50% |
Marcelo Tosatti | 5 | 3.85% | 1 | 6.25% |
Rafael J. Wysocki | 2 | 1.54% | 1 | 6.25% |
Total | 130 | 100.00% | 16 | 100.00% |
/**
* timekeeping_forward_now - update clock to the current time
*
* Forward the current clock to update its state since the last call to
* update_wall_time(). This is useful before significant clock changes,
* as it avoids having to deal with this time offset explicitly.
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
u64 cycle_now, delta;
cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
/* If arch requires, add in get_arch_timeoffset() */
tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
/* If arch requires, add in get_arch_timeoffset() */
tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
tk_normalize_xtime(tk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 70 | 53.03% | 7 | 31.82% |
Thomas Gleixner | 27 | 20.45% | 7 | 31.82% |
Peter Zijlstra | 22 | 16.67% | 2 | 9.09% |
Martin Schwidefsky | 6 | 4.55% | 3 | 13.64% |
Andreas Schwab | 3 | 2.27% | 1 | 4.55% |
Roman Zippel | 3 | 2.27% | 1 | 4.55% |
Stephen Warren | 1 | 0.76% | 1 | 4.55% |
Total | 132 | 100.00% | 22 | 100.00% |
/**
* __getnstimeofday64 - Returns the time of day in a timespec64.
* @ts: pointer to the timespec to be set
*
* Updates the time of day in the timespec.
* Returns 0 on success, or -ve when suspended (timespec will be undefined).
*/
int __getnstimeofday64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->xtime_sec;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsecs);
/*
* Do not bail out early, in case there were callers still using
* the value, even in the face of the WARN_ON.
*/
if (unlikely(timekeeping_suspended))
return -EAGAIN;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 64 | 64.65% | 5 | 38.46% |
Kees Cook | 17 | 17.17% | 1 | 7.69% |
Thomas Gleixner | 16 | 16.16% | 5 | 38.46% |
Martin Schwidefsky | 1 | 1.01% | 1 | 7.69% |
Peter Zijlstra | 1 | 1.01% | 1 | 7.69% |
Total | 99 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(__getnstimeofday64);
/**
* getnstimeofday64 - Returns the time of day in a timespec64.
* @ts: pointer to the timespec64 to be set
*
* Returns the time of day in a timespec64 (WARN if suspended).
*/
void getnstimeofday64(struct timespec64 *ts)
{
WARN_ON(__getnstimeofday64(ts));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kees Cook | 14 | 77.78% | 1 | 33.33% |
Thomas Gleixner | 3 | 16.67% | 1 | 33.33% |
John Stultz | 1 | 5.56% | 1 | 33.33% |
Total | 18 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(getnstimeofday64);
ktime_t ktime_get(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base;
u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = tk->tkr_mono.base;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
} while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 47 | 55.95% | 1 | 9.09% |
Thomas Gleixner | 20 | 23.81% | 6 | 54.55% |
John Stultz | 14 | 16.67% | 3 | 27.27% |
Peter Zijlstra | 3 | 3.57% | 1 | 9.09% |
Total | 84 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get);
u32 ktime_get_resolution_ns(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
u32 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
} while (read_seqcount_retry(&tk_core.seq, seq));
return nsecs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Geyer | 72 | 100.00% | 1 | 100.00% |
Total | 72 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
static ktime_t *offsets[TK_OFFS_MAX] = {
[TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
[TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
[TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
};
ktime_t ktime_get_with_offset(enum tk_offsets offs)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base, *offset = offsets[offs];
u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
nsecs = timekeeping_get_ns(&tk->tkr_mono);
} while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 97 | 97.00% | 4 | 80.00% |
Peter Zijlstra | 3 | 3.00% | 1 | 20.00% |
Total | 100 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_with_offset);
/**
* ktime_mono_to_any() - convert mononotic time to any other time
* @tmono: time to convert.
* @offs: which offset to use
*/
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
ktime_t *offset = offsets[offs];
unsigned long seq;
ktime_t tconv;
do {
seq = read_seqcount_begin(&tk_core.seq);
tconv = ktime_add(tmono, *offset);
} while (read_seqcount_retry(&tk_core.seq, seq));
return tconv;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 67 | 100.00% | 1 | 100.00% |
Total | 67 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_mono_to_any);
/**
* ktime_get_raw - Returns the raw monotonic time in ktime_t format
*/
ktime_t ktime_get_raw(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
base = tk->tkr_raw.base;
nsecs = timekeeping_get_ns(&tk->tkr_raw);
} while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 72 | 91.14% | 2 | 66.67% |
Peter Zijlstra | 7 | 8.86% | 1 | 33.33% |
Total | 79 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_raw);
/**
* ktime_get_ts64 - get the monotonic clock in timespec64 format
* @ts: pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
* in normalized timespec64 format in the variable pointed to by @ts.
*/
void ktime_get_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 tomono;
unsigned int seq;
u64 nsec;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->xtime_sec;
nsec = timekeeping_get_ns(&tk->tkr_mono);
tomono = tk->wall_to_monotonic;
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_sec += tomono.tv_sec;
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsec + tomono.tv_nsec);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 55 | 49.55% | 2 | 15.38% |
John Stultz | 31 | 27.93% | 5 | 38.46% |
Thomas Gleixner | 24 | 21.62% | 5 | 38.46% |
Peter Zijlstra | 1 | 0.90% | 1 | 7.69% |
Total | 111 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_ts64);
/**
* ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
*
* Returns the seconds portion of CLOCK_MONOTONIC with a single non
* serialized read. tk->ktime_sec is of type 'unsigned long' so this
* works on both 32 and 64 bit systems. On 32 bit systems the readout
* covers ~136 years of uptime which should be enough to prevent
* premature wrap arounds.
*/
time64_t ktime_get_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
WARN_ON(timekeeping_suspended);
return tk->ktime_sec;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heena Sirwani | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_seconds);
/**
* ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
*
* Returns the wall clock seconds since 1970. This replaces the
* get_seconds() interface which is not y2038 safe on 32bit systems.
*
* For 64bit systems the fast access to tk->xtime_sec is preserved. On
* 32bit systems the access must be protected with the sequence
* counter to provide "atomic" access to the 64bit tk->xtime_sec
* value.
*/
time64_t ktime_get_real_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
time64_t seconds;
unsigned int seq;
if (IS_ENABLED(CONFIG_64BIT))
return tk->xtime_sec;
do {
seq = read_seqcount_begin(&tk_core.seq);
seconds = tk->xtime_sec;
} while (read_seqcount_retry(&tk_core.seq, seq));
return seconds;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heena Sirwani | 71 | 100.00% | 1 | 100.00% |
Total | 71 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
/**
* __ktime_get_real_seconds - The same as ktime_get_real_seconds
* but without the sequence counter protect. This internal function
* is called just when timekeeping lock is already held.
*/
time64_t __ktime_get_real_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return tk->xtime_sec;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
DengChao | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
/**
* ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
* @systime_snapshot: pointer to struct receiving the system time snapshot
*/
void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq;
ktime_t base_raw;
ktime_t base_real;
u64 nsec_raw;
u64 nsec_real;
u64 now;
WARN_ON_ONCE(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_clock_read(&tk->tkr_mono);
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
} while (read_seqcount_retry(&tk_core.seq, seq));
systime_snapshot->cycles = now;
systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 168 | 97.11% | 3 | 50.00% |
Thomas Gleixner | 3 | 1.73% | 2 | 33.33% |
John Stultz | 2 | 1.16% | 1 | 16.67% |
Total | 173 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(ktime_get_snapshot);
/* Scale base by mult/div checking for overflow */
static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
{
u64 tmp, rem;
tmp = div64_u64_rem(*base, div, &rem);
if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
return -EOVERFLOW;
tmp *= mult;
rem *= mult;
do_div(rem, div);
*base = tmp + rem;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 109 | 100.00% | 1 | 100.00% |
Total | 109 | 100.00% | 1 | 100.00% |
/**
* adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
* @history: Snapshot representing start of history
* @partial_history_cycles: Cycle offset into history (fractional part)
* @total_history_cycles: Total history length in cycles
* @discontinuity: True indicates clock was set on history period
* @ts: Cross timestamp that should be adjusted using
* partial/total ratio
*
* Helper function used by get_device_system_crosststamp() to correct the
* crosstimestamp corresponding to the start of the current interval to the
* system counter value (timestamp point) provided by the driver. The
* total_history_* quantities are the total history starting at the provided
* reference point and ending at the start of the current interval. The cycle
* count between the driver timestamp point and the start of the current
* interval is partial_history_cycles.
*/
static int adjust_historical_crosststamp(struct system_time_snapshot *history,
u64 partial_history_cycles,
u64 total_history_cycles,
bool discontinuity,
struct system_device_crosststamp *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 corr_raw, corr_real;
bool interp_forward;
int ret;
if (total_history_cycles == 0 || partial_history_cycles == 0)
return 0;
/* Interpolate shortest distance from beginning or end of history */
interp_forward = partial_history_cycles > total_history_cycles / 2;
partial_history_cycles = interp_forward ?
total_history_cycles - partial_history_cycles :
partial_history_cycles;
/*
* Scale the monotonic raw time delta by:
* partial_history_cycles / total_history_cycles
*/
corr_raw = (u64)ktime_to_ns(
ktime_sub(ts->sys_monoraw, history->raw));
ret = scale64_check_overflow(partial_history_cycles,
total_history_cycles, &corr_raw);
if (ret)
return ret;
/*
* If there is a discontinuity in the history, scale monotonic raw
* correction by:
* mult(real)/mult(raw) yielding the realtime correction
* Otherwise, calculate the realtime correction similar to monotonic
* raw calculation
*/
if (discontinuity) {
corr_real = mul_u64_u32_div
(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
} else {
corr_real = (u64)ktime_to_ns(
ktime_sub(ts->sys_realtime, history->real));
ret = scale64_check_overflow(partial_history_cycles,
total_history_cycles, &corr_real);
if (ret)
return ret;
}
/* Fixup monotonic raw and real time time values */
if (interp_forward) {
ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
ts->sys_realtime = ktime_add_ns(history->real, corr_real);
} else {
ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 247 | 99.20% | 1 | 50.00% |
Thomas Gleixner | 2 | 0.80% | 1 | 50.00% |
Total | 249 | 100.00% | 2 | 100.00% |
/*
* cycle_between - true if test occurs chronologically between before and after
*/
static bool cycle_between(u64 before, u64 test, u64 after)
{
if (test > before && test < after)
return true;
if (test < before && before > after)
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 41 | 93.18% | 1 | 50.00% |
Thomas Gleixner | 3 | 6.82% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
/**
* get_device_system_crosststamp - Synchronously capture system/device timestamp
* @get_time_fn: Callback to get simultaneous device time and
* system counter from the device driver
* @ctx: Context passed to get_time_fn()
* @history_begin: Historical reference point used to interpolate system
* time when counter provided by the driver is before the current interval
* @xtstamp: Receives simultaneously captured system and device time
*
* Reads a timestamp from a device and correlates it to system time
*/
int get_device_system_crosststamp(int (*get_time_fn)
(ktime_t *device_time,
struct system_counterval_t *sys_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history_begin,
struct system_device_crosststamp *xtstamp)
{
struct system_counterval_t system_counterval;
struct timekeeper *tk = &tk_core.timekeeper;
u64 cycles, now, interval_start;
unsigned int clock_was_set_seq = 0;
ktime_t base_real, base_raw;
u64 nsec_real, nsec_raw;
u8 cs_was_changed_seq;
unsigned long seq;
bool do_interp;
int ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
/*
* Try to synchronously capture device time and a system
* counter value calling back into the device driver
*/
ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
if (ret)
return ret;
/*
* Verify that the clocksource associated with the captured
* system counter value is the same as the currently installed
* timekeeper clocksource
*/
if (tk->tkr_mono.clock != system_counterval.cs)
return -ENODEV;
cycles = system_counterval.cycles;
/*
* Check whether the system counter value provided by the
* device driver is on the current timekeeping interval.
*/
now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
if (!cycle_between(interval_start, cycles, now)) {
clock_was_set_seq = tk->clock_was_set_seq;
cs_was_changed_seq = tk->cs_was_changed_seq;
cycles = interval_start;
do_interp = true;
} else {
do_interp = false;
}
base_real = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
system_counterval.cycles);
nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
system_counterval.cycles);
} while (read_seqcount_retry(&tk_core.seq, seq));
xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
/*
* Interpolate if necessary, adjusting back from the start of the
* current interval
*/
if (do_interp) {
u64 partial_history_cycles, total_history_cycles;
bool discontinuity;
/*
* Check that the counter value occurs after the provided
* history reference and that the history doesn't cross a
* clocksource change
*/
if (!history_begin ||
!cycle_between(history_begin->cycles,
system_counterval.cycles, cycles) ||
history_begin->cs_was_changed_seq != cs_was_changed_seq)
return -EINVAL;
partial_history_cycles = cycles - system_counterval.cycles;
total_history_cycles = cycles - history_begin->cycles;
discontinuity =
history_begin->clock_was_set_seq != clock_was_set_seq;
ret = adjust_historical_crosststamp(history_begin,
partial_history_cycles,
total_history_cycles,
discontinuity, xtstamp);
if (ret)
return ret;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christopher S. Hall | 383 | 98.21% | 2 | 33.33% |
Thomas Gleixner | 3 | 0.77% | 2 | 33.33% |
John Stultz | 2 | 0.51% | 1 | 16.67% |
Ingo Molnar | 2 | 0.51% | 1 | 16.67% |
Total | 390 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
/**
* do_gettimeofday - Returns the time of day in a timeval
* @tv: pointer to the timeval to be set
*
* NOTE: Users should be converted to using getnstimeofday()
*/
void do_gettimeofday(struct timeval *tv)
{
struct timespec64 now;
getnstimeofday64(&now);
tv->tv_sec = now.tv_sec;
tv->tv_usec = now.tv_nsec/1000;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 36 | 94.74% | 1 | 50.00% |
Thomas Gleixner | 2 | 5.26% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(do_gettimeofday);
/**
* do_settimeofday64 - Sets the time of day.
* @ts: pointer to the timespec64 variable containing the new time
*
* Sets the time of day to the new time and update NTP and notify hrtimers
*/
int do_settimeofday64(const struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 ts_delta, xt;
unsigned long flags;
int ret = 0;
if (!timespec64_valid_strict(ts))
return -EINVAL;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
xt = tk_xtime(tk);
ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
ret = -EINVAL;
goto out;
}
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
tk_set_xtime(tk, ts);
out:
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 96 | 52.75% | 9 | 45.00% |
Wang YanQing | 33 | 18.13% | 1 | 5.00% |
Thomas Gleixner | 23 | 12.64% | 4 | 20.00% |
Roman Zippel | 15 | 8.24% | 1 | 5.00% |
Xunlei Pang | 8 | 4.40% | 1 | 5.00% |
David Vrabel | 5 | 2.75% | 2 | 10.00% |
Martin Schwidefsky | 1 | 0.55% | 1 | 5.00% |
Richard Cochran | 1 | 0.55% | 1 | 5.00% |
Total | 182 | 100.00% | 20 | 100.00% |
EXPORT_SYMBOL(do_settimeofday64);
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
* @tv: pointer to the timespec variable containing the offset
*
* Adds or subtracts an offset value from the current time.
*/
static int timekeeping_inject_offset(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
struct timespec64 tmp;
int ret = 0;
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
/* Make sure the proposed value is valid */
tmp = timespec64_add(tk_xtime(tk), *ts);
if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
!timespec64_valid_strict(&tmp)) {
ret = -EINVAL;
goto error;
}
tk_xtime_add(tk, ts);
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
error: /* even if we error out, we forwarded the time, so call update */
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 120 | 67.80% | 9 | 50.00% |
Thomas Gleixner | 23 | 12.99% | 4 | 22.22% |
Arnd Bergmann | 18 | 10.17% | 2 | 11.11% |
Wang YanQing | 11 | 6.21% | 1 | 5.56% |
David Vrabel | 5 | 2.82% | 2 | 11.11% |
Total | 177 | 100.00% | 18 | 100.00% |
/*
* Indicates if there is an offset between the system clock and the hardware
* clock/persistent clock/rtc.
*/
int persistent_clock_is_local;
/*
* Adjust the time obtained from the CMOS to be UTC time instead of
* local time.
*
* This is ugly, but preferable to the alternatives. Otherwise we
* would either need to write a program to do it in /etc/rc (and risk
* confusion if the program gets run more than once; it would also be
* hard to make the program warp the clock precisely n hours) or
* compile in the timezone information into the kernel. Bad, bad....
*
* - TYT, 1992-01-01
*
* The best thing to do is to keep the CMOS clock in universal time (UTC)
* as real UNIX machines always do it. This avoids all headaches about
* daylight saving times and warping kernel clocks.
*/
void timekeeping_warp_clock(void)
{
if (sys_tz.tz_minuteswest != 0) {
struct timespec64 adjust;
persistent_clock_is_local = 1;
adjust.tv_sec = sys_tz.tz_minuteswest * 60;
adjust.tv_nsec = 0;
timekeeping_inject_offset(&adjust);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnd Bergmann | 44 | 93.62% | 2 | 66.67% |
John Stultz | 3 | 6.38% | 1 | 33.33% |
Total | 47 | 100.00% | 3 | 100.00% |
/**
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
*
*/
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
{
tk->tai_offset = tai_offset;
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 37 | 97.37% | 3 | 75.00% |
Fengguang Wu | 1 | 2.63% | 1 | 25.00% |
Total | 38 | 100.00% | 4 | 100.00% |
/**
* change_clocksource - Swaps clocksources if a new one is available
*
* Accumulates current time interval and initializes new clocksource
*/
static int change_clocksource(void *data)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *new, *old;
unsigned long flags;
new = (struct clocksource *) data;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
/*
* If the cs is in module, get a module reference. Succeeds
* for built-in code (owner == NULL) as well.
*/
if (try_module_get(new->owner)) {
if (!new->enable || new->enable(new) == 0) {
old = tk->tkr_mono.clock;
tk_setup_internals(tk, new);
if (old->disable)
old->disable(old);
module_put(old->owner);
} else {
module_put(new->owner);
}
}
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 54 | 32.34% | 4 | 23.53% |
Thomas Gleixner | 53 | 31.74% | 6 | 35.29% |
Martin Schwidefsky | 37 | 22.16% | 3 | 17.65% |
Magnus Damm | 17 | 10.18% | 1 | 5.88% |
David Vrabel | 5 | 2.99% | 2 | 11.76% |
Peter Zijlstra | 1 | 0.60% | 1 | 5.88% |
Total | 167 | 100.00% | 17 | 100.00% |
/**
* timekeeping_notify - Install a new clock source
* @clock: pointer to the clock source
*
* This function is called from clocksource.c after a new, better clock
* source has been registered. The caller holds the clocksource_mutex.
*/
int timekeeping_notify(struct clocksource *clock)
{
struct timekeeper *tk = &tk_core.timekeeper;
if (tk->tkr_mono.clock == clock)
return 0;
stop_machine(change_clocksource, clock, NULL);
tick_clock_notify();
return tk->tkr_mono.clock == clock ? 0 : -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 23 | 38.98% | 1 | 14.29% |
Thomas Gleixner | 20 | 33.90% | 3 | 42.86% |
John Stultz | 14 | 23.73% | 2 | 28.57% |
Peter Zijlstra | 2 | 3.39% | 1 | 14.29% |
Total | 59 | 100.00% | 7 | 100.00% |
/**
* getrawmonotonic64 - Returns the raw monotonic time in a timespec
* @ts: pointer to the timespec64 to be set
*
* Returns the raw monotonic time (completely un-modified by ntp)
*/
void getrawmonotonic64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->raw_sec;
nsecs = timekeeping_get_ns(&tk->tkr_raw);
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsecs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 69 | 82.14% | 6 | 60.00% |
Thomas Gleixner | 11 | 13.10% | 3 | 30.00% |
Peter Zijlstra | 4 | 4.76% | 1 | 10.00% |
Total | 84 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(getrawmonotonic64);
/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq;
int ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
} while (read_seqcount_retry(&tk_core.seq, seq));
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 52 | 80.00% | 2 | 28.57% |
Thomas Gleixner | 11 | 16.92% | 3 | 42.86% |
Li Zefan | 1 | 1.54% | 1 | 14.29% |
Peter Zijlstra | 1 | 1.54% | 1 | 14.29% |
Total | 65 | 100.00% | 7 | 100.00% |
/**
* timekeeping_max_deferment - Returns max time the clocksource can be deferred
*/
u64 timekeeping_max_deferment(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq;
u64 ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
ret = tk->tkr_mono.clock->max_idle_ns;
} while (read_seqcount_retry(&tk_core.seq, seq));
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 40 | 63.49% | 2 | 28.57% |
Jon Hunter | 11 | 17.46% | 1 | 14.29% |
Thomas Gleixner | 11 | 17.46% | 3 | 42.86% |
Peter Zijlstra | 1 | 1.59% | 1 | 14.29% |
Total | 63 | 100.00% | 7 | 100.00% |
/**
* read_persistent_clock - Return time from the persistent clock.
*
* Weak dummy function for arches that do not yet support it.
* Reads the time from the battery backed persistent clock.
* Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
*
* XXX - Do be sure to remove it once all arches implement it.
*/
void __weak read_persistent_clock(struct timespec *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 14 | 60.87% | 1 | 33.33% |
Gideon Israel Dsouza | 5 | 21.74% | 1 | 33.33% |
John Stultz | 4 | 17.39% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
void __weak read_persistent_clock64(struct timespec64 *ts64)
{
struct timespec ts;
read_persistent_clock(&ts);
*ts64 = timespec_to_timespec64(ts);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Xunlei Pang | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
/**
* read_boot_clock64 - Return time of the system start.
*
* Weak dummy function for arches that do not yet support it.
* Function to read the exact time the system has been started.
* Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
*
* XXX - Do be sure to remove it once all arches implement it.
*/
void __weak read_boot_clock64(struct timespec64 *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 16 | 69.57% | 1 | 33.33% |
Gideon Israel Dsouza | 5 | 21.74% | 1 | 33.33% |
Xunlei Pang | 2 | 8.70% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
/* Flag for if timekeeping_resume() has injected sleeptime */
static bool sleeptime_injected;
/* Flag for if there is a persistent clock on this platform */
static bool persistent_clock_exists;
/*
* timekeeping_init - Initializes the clocksource and common timekeeping values
*/
void __init timekeeping_init(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *clock;
unsigned long flags;
struct timespec64 now, boot, tmp;
read_persistent_clock64(&now);
if (!timespec64_valid_strict(&now)) {
pr_warn("WARNING: Persistent clock returned invalid value!\n"
" Check your CMOS/BIOS settings.\n");
now.tv_sec = 0;
now.tv_nsec = 0;
} else if (now.tv_sec || now.tv_nsec)
persistent_clock_exists = true;
read_boot_clock64(&boot);
if (!timespec64_valid_strict(&boot)) {
pr_warn("WARNING: Boot clock returned invalid value!\n"
" Check your CMOS/BIOS settings.\n");
boot.tv_sec = 0;
boot.tv_nsec = 0;
}
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
ntp_init();
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
tk_setup_internals(tk, clock);
tk_set_xtime(tk, &now);
tk->raw_sec = 0;
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
boot = tk_xtime(tk);
set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
tk_set_wall_to_mono(tk, tmp);
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 142 | 57.49% | 10 | 37.04% |
Martin Schwidefsky | 52 | 21.05% | 5 | 18.52% |
Thomas Gleixner | 32 | 12.96% | 7 | 25.93% |
Feng Tang | 14 | 5.67% | 1 | 3.70% |
Magnus Damm | 4 | 1.62% | 1 | 3.70% |
Xunlei Pang | 3 | 1.21% | 3 | 11.11% |
Total | 247 | 100.00% | 27 | 100.00% |
/* time in seconds when suspend began for persistent clock */
static struct timespec64 timekeeping_suspend_time;
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @delta: pointer to a timespec delta value
*
* Takes a timespec offset measuring a suspend interval and properly
* adds the sleep offset to the timekeeping variables.
*/
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
struct timespec64 *delta)
{
if (!timespec64_valid_strict(delta)) {
printk_deferred(KERN_WARNING
"__timekeeping_inject_sleeptime: Invalid "
"sleep delta value!\n");
return;
}
tk_xtime_add(tk, delta);
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
tk_debug_account_sleep_time(delta);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 63 | 87.50% | 8 | 72.73% |
Colin Cross | 5 | 6.94% | 1 | 9.09% |
Thomas Gleixner | 4 | 5.56% | 2 | 18.18% |
Total | 72 | 100.00% | 11 | 100.00% |
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
/**
* We have three kinds of time sources to use for sleep time
* injection, the preference order is:
* 1) non-stop clocksource
* 2) persistent clock (ie: RTC accessible when irqs are off)
* 3) RTC
*
* 1) and 2) are used by timekeeping, 3) by RTC subsystem.
* If system has neither 1) nor 2), 3) will be used finally.
*
*
* If timekeeping has injected sleeptime via either 1) or 2),
* 3) becomes needless, so in this case we don't need to call
* rtc_resume(), and this is what timekeeping_rtc_skipresume()
* means.
*/
bool timekeeping_rtc_skipresume(void)
{
return sleeptime_injected;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Xunlei Pang | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
/**
* 1) can be determined whether to use or not only when doing
* timekeeping_resume() which is invoked after rtc_suspend(),
* so we can't skip rtc_suspend() surely if system has 1).
*
* But if system has 2), 2) will definitely be used, so in this
* case we don't need to call rtc_suspend(), and this is what
* timekeeping_rtc_skipsuspend() means.
*/
bool timekeeping_rtc_skipsuspend(void)
{
return persistent_clock_exists;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Xunlei Pang | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
/**
* timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
* @delta: pointer to a timespec64 delta value
*
* This hook is for architectures that cannot support read_persistent_clock64
* because their RTC/persistent clock is only accessible when irqs are enabled.
* and also don't have an effective nonstop clocksource.
*
* This function should only be called by rtc_resume(), and allows
* a suspend offset to be injected into the timekeeping values.
*/
void timekeeping_inject_sleeptime64(struct timespec64 *delta)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
__timekeeping_inject_sleeptime(tk, delta);
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 52 | 62.65% | 5 | 41.67% |
Thomas Gleixner | 23 | 27.71% | 4 | 33.33% |
David Vrabel | 5 | 6.02% | 2 | 16.67% |
Xunlei Pang | 3 | 3.61% | 1 | 8.33% |
Total | 83 | 100.00% | 12 | 100.00% |
#endif
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
*/
void timekeeping_resume(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *clock = tk->tkr_mono.clock;
unsigned long flags;
struct timespec64 ts_new, ts_delta;
u64 cycle_now;
sleeptime_injected = false;
read_persistent_clock64(&ts_new);
clockevents_resume();
clocksource_resume();
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
/*
* After system resumes, we need to calculate the suspended time and
* compensate it for the OS time. There are 3 sources that could be
* used: Nonstop clocksource during suspend, persistent clock and rtc
* device.
*
* One specific platform may have 1 or 2 or all of them, and the
* preference will be:
* suspend-nonstop clocksource -> persistent clock -> rtc
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) {
u64 nsec, cyc_delta;
cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
tk->tkr_mono.mask);
nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
ts_delta = ns_to_timespec64(nsec);
sleeptime_injected = true;
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
sleeptime_injected = true;
}
if (sleeptime_injected)
__timekeeping_inject_sleeptime(tk, &ts_delta);
/* Re-base the last cycle value */
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
tk->ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
touch_softlockup_watchdog();
tick_resume();
hrtimers_resume();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Feng Tang | 79 | 30.86% | 1 | 2.86% |
Thomas Gleixner | 63 | 24.61% | 14 | 40.00% |
John Stultz | 63 | 24.61% | 8 | 22.86% |
Martin Schwidefsky | 20 | 7.81% | 2 | 5.71% |
Peter Zijlstra | 14 | 5.47% | 2 | 5.71% |
Xunlei Pang | 7 | 2.73% | 2 | 5.71% |
Rafael J. Wysocki | 5 | 1.95% | 2 | 5.71% |
David Vrabel | 3 | 1.17% | 2 | 5.71% |
Ingo Molnar | 1 | 0.39% | 1 | 2.86% |
Tomas Janousek | 1 | 0.39% | 1 | 2.86% |
Total | 256 | 100.00% | 35 | 100.00% |
int timekeeping_suspend(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
struct timespec64 delta, delta_delta;
static struct timespec64 old_delta;
read_persistent_clock64(&timekeeping_suspend_time);
/*
* On some systems the persistent_clock can not be detected at
* timekeeping_init by its return value, so if we see a valid
* value returned, update the persistent_clock_exists flag.
*/
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
persistent_clock_exists = true;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
timekeeping_suspended = 1;
if (persistent_clock_exists) {
/*
* To avoid drift caused by repeated suspend/resumes,
* which each can add ~1 second drift error,
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
delta_delta = timespec64_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occurred and set old_delta to the current delta.
*/
old_delta = delta;
} else {
/* Otherwise try to adjust old_system to compensate */
timekeeping_suspend_time =
timespec64_add(timekeeping_suspend_time, delta_delta);
}
}
timekeeping_update(tk, TK_MIRROR);
halt_fast_timekeeper(tk);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
tick_suspend();
clocksource_suspend();
clockevents_suspend();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 110 | 62.15% | 9 | 36.00% |
Thomas Gleixner | 26 | 14.69% | 6 | 24.00% |
Zoran Markovic | 14 | 7.91% | 1 | 4.00% |
Xunlei Pang | 10 | 5.65% | 3 | 12.00% |
Rafael J. Wysocki | 9 | 5.08% | 3 | 12.00% |
Martin Schwidefsky | 5 | 2.82% | 2 | 8.00% |
Magnus Damm | 3 | 1.69% | 1 | 4.00% |
Total | 177 | 100.00% | 25 | 100.00% |
/* sysfs resume/suspend bits for timekeeping */
static struct syscore_ops timekeeping_syscore_ops = {
.resume = timekeeping_resume,
.suspend = timekeeping_suspend,
};
static int __init timekeeping_init_ops(void)
{
register_syscore_ops(&timekeeping_syscore_ops);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 14 | 77.78% | 1 | 50.00% |
Rafael J. Wysocki | 4 | 22.22% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
device_initcall(timekeeping_init_ops);
/*
* Apply a multiplier adjustment to the timekeeper
*/
static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
s64 offset,
bool negative,
int adj_scale)
{
s64 interval = tk->cycle_interval;
s32 mult_adj = 1;
if (negative) {
mult_adj = -mult_adj;
interval = -interval;
offset = -offset;
}
mult_adj <<= adj_scale;
interval <<= adj_scale;
offset <<= adj_scale;
/*
* So the following can be confusing.
*
* To keep things simple, lets assume mult_adj == 1 for now.
*
* When mult_adj != 1, remember that the interval and offset values
* have been appropriately scaled so the math is the same.
*
* The basic idea here is that we're increasing the multiplier
* by one, this causes the xtime_interval to be incremented by
* one cycle_interval. This is because:
* xtime_interval = cycle_interval * mult
* So if mult is being incremented by one:
* xtime_interval = cycle_interval * (mult + 1)
* Its the same as:
* xtime_interval = (cycle_interval * mult) + cycle_interval
* Which can be shortened to:
* xtime_interval += cycle_interval
*
* So offset stores the non-accumulated cycles. Thus the current
* time (in shifted nanoseconds) is:
* now = (offset * adj) + xtime_nsec
* Now, even though we're adjusting the clock frequency, we have
* to keep time consistent. In other words, we can't jump back
* in time, and we also want to avoid jumping forward in time.
*
* So given the same offset value, we need the time to be the same
* both before and after the freq adjustment.
* now = (offset * adj_1) + xtime_nsec_1
* now = (offset * adj_2) + xtime_nsec_2
* So:
* (offset * adj_1) + xtime_nsec_1 =
* (offset * adj_2) + xtime_nsec_2
* And we know:
* adj_2 = adj_1 + 1
* So:
* (offset * adj_1) + xtime_nsec_1 =
* (offset * (adj_1+1)) + xtime_nsec_2
* (offset * adj_1) + xtime_nsec_1 =
* (offset * adj_1) + offset + xtime_nsec_2
* Canceling the sides:
* xtime_nsec_1 = offset + xtime_nsec_2
* Which gives us:
* xtime_nsec_2 = xtime_nsec_1 - offset
* Which simplfies to:
* xtime_nsec -= offset
*
* XXX - TODO: Doc ntp_error calculation.
*/
if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
/* NTP adjustment caused clocksource mult overflow */
WARN_ON_ONCE(1);
return;
}
tk->tkr_mono.mult += mult_adj;
tk->xtime_interval += interval;
tk->tkr_mono.xtime_nsec -= offset;
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 109 | 82.58% | 4 | 66.67% |
Xunlei Pang | 20 | 15.15% | 1 | 16.67% |
Peter Zijlstra | 3 | 2.27% | 1 | 16.67% |
Total | 132 | 100.00% | 6 | 100.00% |
/*
* Calculate the multiplier adjustment needed to match the frequency
* specified by NTP
*/
static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
s64 offset)
{
s64 interval = tk->cycle_interval;
s64 xinterval = tk->xtime_interval;
u32 base = tk->tkr_mono.clock->mult;
u32 max = tk->tkr_mono.clock->maxadj;
u32 cur_adj = tk->tkr_mono.mult;
s64 tick_error;
bool negative;
u32 adj_scale;
/* Remove any current error adj from freq calculation */
if (tk->ntp_err_mult)
xinterval -= tk->cycle_interval;
tk->ntp_tick = ntp_tick_length();
/* Calculate current error per tick */
tick_error = ntp_tick_length() >> tk->ntp_error_shift;
tick_error -= (xinterval + tk->xtime_remainder);
/* Don't worry about correcting it if its small */
if (likely((tick_error >= 0) && (tick_error <= interval)))
return;
/* preserve the direction of correction */
negative = (tick_error < 0);
/* If any adjustment would pass the max, just return */
if (negative && (cur_adj - 1) <= (base - max))
return;
if (!negative && (cur_adj + 1) >= (base + max))
return;
/*
* Sort out the magnitude of the correction, but
* avoid making so large a correction that we go
* over the max adjustment.
*/
adj_scale = 0;
tick_error = abs(tick_error);
while (tick_error > interval) {
u32 adj = 1 << (adj_scale + 1);
/* Check if adjustment gets us within 1 unit from the max */
if (negative && (cur_adj - adj) <= (base - max))
break;
if (!negative && (cur_adj + adj) >= (base + max))
break;
adj_scale++;
tick_error >>= 1;
}
/* scale the corrections */
timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 257 | 99.23% | 5 | 71.43% |
Andrew Morton | 1 | 0.39% | 1 | 14.29% |
Martin Schwidefsky | 1 | 0.39% | 1 | 14.29% |
Total | 259 | 100.00% | 7 | 100.00% |
/*
* Adjust the timekeeper's multiplier to the correct frequency
* and also to reduce the accumulated error value.
*/
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
{
/* Correct for the current frequency error */
timekeeping_freqadjust(tk, offset);
/* Next make a small adjustment to fix any cumulative error */
if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
tk->ntp_err_mult = 1;
timekeeping_apply_adjustment(tk, offset, 0, 0);
} else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
/* Undo any existing error adjustment */
timekeeping_apply_adjustment(tk, offset, 1, 0);
tk->ntp_err_mult = 0;
}
if (unlikely(tk->tkr_mono.clock->maxadj &&
(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
> tk->tkr_mono.clock->maxadj))) {
printk_once(KERN_WARNING
"Adjusting %s more than 11%% (%ld vs %ld)\n",
tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
}
/*
* It may be possible that when we entered this function, xtime_nsec
* was very small. Further, if we're slightly speeding the clocksource
* in the code above, its possible the required corrective factor to
* xtime_nsec could cause it to underflow.
*
* Now, since we already accumulated the second, cannot simply roll
* the accumulated second back, since the NTP subsystem has been
* notified via second_overflow. So instead we push xtime_nsec forward
* by the amount we underflowed, and add that amount into the error.
*
* We'll correct this error next time through this function, when
* xtime_nsec is not as small.
*/
if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
tk->tkr_mono.xtime_nsec = 0;
tk->ntp_error += neg << tk->ntp_error_shift;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 197 | 87.17% | 6 | 60.00% |
Thomas Gleixner | 11 | 4.87% | 1 | 10.00% |
Peter Zijlstra | 11 | 4.87% | 1 | 10.00% |
Xunlei Pang | 5 | 2.21% | 1 | 10.00% |
Ingo Molnar | 2 | 0.88% | 1 | 10.00% |
Total | 226 | 100.00% | 10 | 100.00% |
/**
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
* Helper function that accumulates the nsecs greater than a second
* from the xtime_nsec field to the xtime_secs field.
* It also calls into the NTP code to handle leapsecond processing.
*
*/
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
{
u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
unsigned int clock_set = 0;
while (tk->tkr_mono.xtime_nsec >= nsecps) {
int leap;
tk->tkr_mono.xtime_nsec -= nsecps;
tk->xtime_sec++;
/* Figure out if its a leap sec and apply if needed */
leap = second_overflow(tk->xtime_sec);
if (unlikely(leap)) {
struct timespec64 ts;
tk->xtime_sec += leap;
ts.tv_sec = leap;
ts.tv_nsec = 0;
tk_set_wall_to_mono(tk,
timespec64_sub(tk->wall_to_monotonic, ts));
__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
clock_set = TK_CLOCK_WAS_SET;
}
}
return clock_set;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 106 | 79.10% | 7 | 63.64% |
David Vrabel | 12 | 8.96% | 1 | 9.09% |
Martin Schwidefsky | 10 | 7.46% | 1 | 9.09% |
Peter Zijlstra | 3 | 2.24% | 1 | 9.09% |
Thomas Gleixner | 3 | 2.24% | 1 | 9.09% |
Total | 134 | 100.00% | 11 | 100.00% |
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
* This functions accumulates a shifted interval of cycles into
* into a shifted interval nanoseconds. Allows for O(log) accumulation
* loop.
*
* Returns the unconsumed cycles.
*/
static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
u32 shift, unsigned int *clock_set)
{
u64 interval = tk->cycle_interval << shift;
u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
return offset;
/* Accumulate one shifted interval */
offset -= interval;
tk->tkr_mono.cycle_last += interval;
tk->tkr_raw.cycle_last += interval;
tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
tk->tkr_raw.xtime_nsec -= snsec_per_sec;
tk->raw_sec++;
}
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
(tk->ntp_error_shift + shift);
return offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 128 | 74.42% | 10 | 47.62% |
Thomas Gleixner | 16 | 9.30% | 3 | 14.29% |
Peter Zijlstra | 10 | 5.81% | 2 | 9.52% |
Roman Zippel | 5 | 2.91% | 1 | 4.76% |
Jason Wessel | 5 | 2.91% | 1 | 4.76% |
Kasper Pedersen | 4 | 2.33% | 1 | 4.76% |
Martin Schwidefsky | 3 | 1.74% | 2 | 9.52% |
Zhen Lei | 1 | 0.58% | 1 | 4.76% |
Total | 172 | 100.00% | 21 | 100.00% |
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
*/
void update_wall_time(void)
{
struct timekeeper *real_tk = &tk_core.timekeeper;
struct timekeeper *tk = &shadow_timekeeper;
u64 offset;
int shift = 0, maxshift;
unsigned int clock_set = 0;
unsigned long flags;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
goto out;
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif
/* Check if there's really nothing to do */
if (offset < real_tk->cycle_interval)
goto out;
/* Do some additional sanity checking */
timekeeping_check_update(tk, offset);
/*
* With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently,
* we calculate the largest doubling multiple of cycle_intervals
* that is smaller than the offset. We then accumulate that
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
shift = ilog2(offset) - ilog2(tk->cycle_interval);
shift = max(0, shift);
/* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
while (offset >= tk->cycle_interval) {
offset = logarithmic_accumulation(tk, offset, shift,
&clock_set);
if (offset < tk->cycle_interval<<shift)
shift--;
}
/* correct the clock when NTP error is too big */
timekeeping_adjust(tk, offset);
/*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
clock_set |= accumulate_nsecs_to_secs(tk);
write_seqcount_begin(&tk_core.seq);
/*
* Update the real timekeeper.
*
* We could avoid this memcpy by switching pointers, but that
* requires changes to all other timekeeper usage sites as
* well, i.e. move the timekeeper pointer getter into the
* spinlocked/seqcount protected sections. And we trade this
* memcpy under the tk_core.seq against one before we start
* updating.
*/
timekeeping_update(tk, clock_set);
memcpy(real_tk, tk, sizeof(*tk));
/* The memcpy must come last. Do not put anything here! */
write_seqcount_end(&tk_core.seq);
out:
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
if (clock_set)
/* Have to call _delayed version, since in irq context*/
clock_was_set_delayed();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 215 | 75.97% | 19 | 55.88% |
Thomas Gleixner | 58 | 20.49% | 10 | 29.41% |
Peter Zijlstra | 3 | 1.06% | 1 | 2.94% |
David Vrabel | 3 | 1.06% | 1 | 2.94% |
Jim Cromie | 2 | 0.71% | 1 | 2.94% |
Stafford Horne | 1 | 0.35% | 1 | 2.94% |
Martin Schwidefsky | 1 | 0.35% | 1 | 2.94% |
Total | 283 | 100.00% | 34 | 100.00% |
/**
* getboottime64 - Return the real time of system boot.
* @ts: pointer to the timespec64 to be set
*
* Returns the wall-time of boot in a timespec64.
*
* This is based on the wall_to_monotonic offset and the total suspend
* time. Calls to settimeofday will affect the value returned (which
* basically means that however wrong your real time clock is at boot time,
* you get the right time here).
*/
void getboottime64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
*ts = ktime_to_timespec64(t);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 33.33% | 2 | 28.57% |
John Stultz | 14 | 33.33% | 2 | 28.57% |
Tomas Janousek | 11 | 26.19% | 1 | 14.29% |
Hiroshi Shimamoto | 2 | 4.76% | 1 | 14.29% |
Martin Schwidefsky | 1 | 2.38% | 1 | 14.29% |
Total | 42 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(getboottime64);
unsigned long get_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return tk->xtime_sec;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 21 | 91.30% | 4 | 80.00% |
Thomas Gleixner | 2 | 8.70% | 1 | 20.00% |
Total | 23 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return timespec64_to_timespec(tk_xtime(tk));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 25 | 92.59% | 4 | 80.00% |
Thomas Gleixner | 2 | 7.41% | 1 | 20.00% |
Total | 27 | 100.00% | 5 | 100.00% |
struct timespec64 current_kernel_time64(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 now;
unsigned long seq;
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_xtime(tk);
} while (read_seqcount_retry(&tk_core.seq, seq));
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 50 | 80.65% | 4 | 57.14% |
Thomas Gleixner | 10 | 16.13% | 2 | 28.57% |
Baolin Wang | 2 | 3.23% | 1 | 14.29% |
Total | 62 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(current_kernel_time64);
struct timespec64 get_monotonic_coarse64(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 now, mono;
unsigned long seq;
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_xtime(tk);
mono = tk->wall_to_monotonic;
} while (read_seqcount_retry(&tk_core.seq, seq));
set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
return now;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 82 | 89.13% | 5 | 71.43% |
Thomas Gleixner | 10 | 10.87% | 2 | 28.57% |
Total | 92 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(get_monotonic_coarse64);
/*
* Must hold jiffies_lock
*/
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
calc_global_load(ticks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Torben Hohn | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
/**
* ktime_get_update_offsets_now - hrtimer helper
* @cwsseq: pointer to check and store the clock was set sequence number
* @offs_real: pointer to storage for monotonic -> realtime offset
* @offs_boot: pointer to storage for monotonic -> boottime offset
* @offs_tai: pointer to storage for monotonic -> clock tai offset
*
* Returns current monotonic time and updates the offsets if the
* sequence number in @cwsseq and timekeeper.clock_was_set_seq are
* different.
*
* Called from hrtimer_interrupt() or retrigger_next_event()
*/
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
ktime_t *offs_boot, ktime_t *offs_tai)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
base = tk->tkr_mono.base;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
base = ktime_add_ns(base, nsecs);
if (*cwsseq != tk->clock_was_set_seq) {
*cwsseq = tk->clock_was_set_seq;
*offs_real = tk->offs_real;
*offs_boot = tk->offs_boot;
*offs_tai = tk->offs_tai;
}
/* Handle leapsecond insertion adjustments */
if (unlikely(base >= tk->next_leap_ktime))
*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
} while (read_seqcount_retry(&tk_core.seq, seq));
return base;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 96 | 57.83% | 7 | 53.85% |
John Stultz | 67 | 40.36% | 5 | 38.46% |
Peter Zijlstra | 3 | 1.81% | 1 | 7.69% |
Total | 166 | 100.00% | 13 | 100.00% |
/**
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
static int timekeeping_validate_timex(struct timex *txc)
{
if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
return -EINVAL;
if (!(txc->modes & ADJ_OFFSET_READONLY) &&
!capable(CAP_SYS_TIME))
return -EPERM;
} else {
/* In order to modify anything, you gotta be super-user! */
if (txc->modes && !capable(CAP_SYS_TIME))
return -EPERM;
/*
* if the quartz is off by more than 10% then
* something is VERY wrong!
*/
if (txc->modes & ADJ_TICK &&
(txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ))
return -EINVAL;
}
if (txc->modes & ADJ_SETOFFSET) {
/* In order to inject time, you gotta be super-user! */
if (!capable(CAP_SYS_TIME))
return -EPERM;
/*
* Validate if a timespec/timeval used to inject a time
* offset is valid. Offsets can be postive or negative, so
* we don't check tv_sec. The value of the timeval/timespec
* is the sum of its fields,but *NOTE*:
* The field tv_usec/tv_nsec must always be non-negative and
* we can't have more nanoseconds/microseconds than a second.
*/
if (txc->time.tv_usec < 0)
return -EINVAL;
if (txc->modes & ADJ_NANO) {
if (txc->time.tv_usec >= NSEC_PER_SEC)
return -EINVAL;
} else {
if (txc->time.tv_usec >= USEC_PER_SEC)
return -EINVAL;
}
}
/*
* Check for potential multiplication overflows that can
* only happen on 64-bit systems:
*/
if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
if (LLONG_MIN / PPM_SCALE > txc->freq)
return -EINVAL;
if (LLONG_MAX / PPM_SCALE < txc->freq)
return -EINVAL;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnd Bergmann | 238 | 100.00% | 2 | 100.00% |
Total | 238 | 100.00% | 2 | 100.00% |
/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
*/
int do_adjtimex(struct timex *txc)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
struct timespec64 ts;
s32 orig_tai, tai;
int ret;
/* Validate the data before disabling interrupts */
ret = timekeeping_validate_timex(txc);
if (ret)
return ret;
if (txc->modes & ADJ_SETOFFSET) {
struct timespec64 delta;
delta.tv_sec = txc->time.tv_sec;
delta.tv_nsec = txc->time.tv_usec;
if (!(txc->modes & ADJ_NANO))
delta.tv_nsec *= 1000;
ret = timekeeping_inject_offset(&delta);
if (ret)
return ret;
}
getnstimeofday64(&ts);
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
orig_tai = tai = tk->tai_offset;
ret = __do_adjtimex(txc, &ts, &tai);
if (tai != orig_tai) {
__timekeeping_set_tai_offset(tk, tai);
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
}
tk_update_leap_state(tk);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
if (tai != orig_tai)
clock_was_set();
ntp_notify_cmos_timer();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 204 | 92.73% | 12 | 75.00% |
Thomas Gleixner | 9 | 4.09% | 2 | 12.50% |
David Vrabel | 5 | 2.27% | 1 | 6.25% |
Arnd Bergmann | 2 | 0.91% | 1 | 6.25% |
Total | 220 | 100.00% | 16 | 100.00% |
#ifdef CONFIG_NTP_PPS
/**
* hardpps() - Accessor function to NTP __hardpps function
*/
void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
{
unsigned long flags;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
__hardpps(phase_ts, raw_ts);
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 52 | 86.67% | 2 | 50.00% |
Thomas Gleixner | 6 | 10.00% | 1 | 25.00% |
Arnd Bergmann | 2 | 3.33% | 1 | 25.00% |
Total | 60 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(hardpps);
#endif /* CONFIG_NTP_PPS */
/**
* xtime_update() - advances the timekeeping infrastructure
* @ticks: number of ticks, that have elapsed since the last call.
*
* Must be called with interrupts disabled.
*/
void xtime_update(unsigned long ticks)
{
write_seqlock(&jiffies_lock);
do_timer(ticks);
write_sequnlock(&jiffies_lock);
update_wall_time();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Torben Hohn | 24 | 82.76% | 1 | 33.33% |
John Stultz | 5 | 17.24% | 2 | 66.67% |
Total | 29 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Stultz | 3804 | 44.21% | 79 | 36.57% |
Thomas Gleixner | 1485 | 17.26% | 48 | 22.22% |
Christopher S. Hall | 1042 | 12.11% | 5 | 2.31% |
Martin Schwidefsky | 434 | 5.04% | 10 | 4.63% |
Arnd Bergmann | 315 | 3.66% | 4 | 1.85% |
Peter Zijlstra | 292 | 3.39% | 6 | 2.78% |
Heena Sirwani | 157 | 1.82% | 2 | 0.93% |
Marcelo Tosatti | 141 | 1.64% | 1 | 0.46% |
Xunlei Pang | 137 | 1.59% | 10 | 4.63% |
Feng Tang | 93 | 1.08% | 2 | 0.93% |
Prarit Bhargava | 91 | 1.06% | 1 | 0.46% |
Rafael J. Wysocki | 81 | 0.94% | 4 | 1.85% |
Harald Geyer | 77 | 0.89% | 1 | 0.46% |
David Vrabel | 71 | 0.83% | 2 | 0.93% |
Wang YanQing | 44 | 0.51% | 1 | 0.46% |
Torben Hohn | 43 | 0.50% | 2 | 0.93% |
Joel Fernandes | 37 | 0.43% | 1 | 0.46% |
Kees Cook | 35 | 0.41% | 1 | 0.46% |
Stephen Warren | 29 | 0.34% | 1 | 0.46% |
Magnus Damm | 24 | 0.28% | 2 | 0.93% |
Roman Zippel | 23 | 0.27% | 1 | 0.46% |
DengChao | 23 | 0.27% | 1 | 0.46% |
Kasper Pedersen | 16 | 0.19% | 1 | 0.46% |
Zoran Markovic | 14 | 0.16% | 1 | 0.46% |
Gideon Israel Dsouza | 13 | 0.15% | 1 | 0.46% |
Tomas Janousek | 12 | 0.14% | 1 | 0.46% |
Jon Hunter | 11 | 0.13% | 1 | 0.46% |
Ingo Molnar | 11 | 0.13% | 5 | 2.31% |
Colin Cross | 8 | 0.09% | 1 | 0.46% |
Grégor Boirie | 5 | 0.06% | 1 | 0.46% |
Jason Wessel | 5 | 0.06% | 1 | 0.46% |
Jason (Hui) Wang | 4 | 0.05% | 1 | 0.46% |
Baolin Wang | 3 | 0.03% | 1 | 0.46% |
Andreas Schwab | 3 | 0.03% | 1 | 0.46% |
Alexey Dobriyan | 3 | 0.03% | 1 | 0.46% |
Jim Cromie | 2 | 0.02% | 1 | 0.46% |
Hiroshi Shimamoto | 2 | 0.02% | 1 | 0.46% |
Li Zefan | 2 | 0.02% | 1 | 0.46% |
Zhen Lei | 2 | 0.02% | 1 | 0.46% |
Yijing Wang | 1 | 0.01% | 1 | 0.46% |
Robert P. J. Day | 1 | 0.01% | 1 | 0.46% |
Stafford Horne | 1 | 0.01% | 1 | 0.46% |
Richard Cochran | 1 | 0.01% | 1 | 0.46% |
Andrew Morton | 1 | 0.01% | 1 | 0.46% |
Stephen Boyd | 1 | 0.01% | 1 | 0.46% |
H Hartley Sweeten | 1 | 0.01% | 1 | 0.46% |
Masanari Iida | 1 | 0.01% | 1 | 0.46% |
Fengguang Wu | 1 | 0.01% | 1 | 0.46% |
Geert Uytterhoeven | 1 | 0.01% | 1 | 0.46% |
Total | 8604 | 100.00% | 216 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.