Release 4.14 arch/alpha/kernel/time.c
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/alpha/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
*
* This file contains the clocksource time handling.
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* 1997-01-09 Adrian Sun
* use interval timer if CONFIG_RTC=y
* 1997-10-29 John Bowman (bowman@math.ualberta.ca)
* fixed tick loss calculation in timer_interrupt
* (round system clock to nearest tick instead of truncating)
* fixed algorithm in time_init for getting time from CMOS clock
* 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
* fixed algorithm in do_gettimeofday() for calculating the precise time
* from processor cycle counter (now taking lost_ticks into account)
* 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
* Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/profile.h>
#include <linux/irq_work.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include "proto.h"
#include "irq_impl.h"
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
unsigned long est_cycle_freq;
#ifdef CONFIG_IRQ_WORK
DEFINE_PER_CPU(u8, irq_work_pending);
#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
void arch_irq_work_raise(void)
{
set_irq_work_pending_flag();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Cree | 8 | 80.00% | 1 | 33.33% |
Peter Zijlstra | 2 | 20.00% | 2 | 66.67% |
Total | 10 | 100.00% | 3 | 100.00% |
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
#endif /* CONFIG_IRQ_WORK */
static inline __u32 rpcc(void)
{
return __builtin_alpha_rpcc();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 11 | 84.62% | 1 | 50.00% |
Richard Henderson | 2 | 15.38% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
/*
* The RTC as a clock_event_device primitive.
*/
static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
irqreturn_t
rtc_timer_interrupt(int irq, void *dev)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
/* Don't run the hook for UNUSED or SHUTDOWN. */
if (likely(clockevent_state_periodic(ce)))
ce->event_handler(ce);
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();
}
return IRQ_HANDLED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 30 | 46.15% | 2 | 15.38% |
Linus Torvalds (pre-git) | 16 | 24.62% | 6 | 46.15% |
Michael Cree | 10 | 15.38% | 1 | 7.69% |
Peter Zijlstra | 5 | 7.69% | 2 | 15.38% |
Viresh Kumar | 3 | 4.62% | 1 | 7.69% |
Marc Zyngier | 1 | 1.54% | 1 | 7.69% |
Total | 65 | 100.00% | 13 | 100.00% |
static int
rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
/* This hook is for oneshot mode, which we don't support. */
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static void __init
init_rtc_clockevent(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
*ce = (struct clock_event_device){
.name = "rtc",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 100,
.cpumask = cpumask_of(cpu),
.set_next_event = rtc_ce_set_next_event,
};
clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 70 | 92.11% | 2 | 50.00% |
Peter Zijlstra | 5 | 6.58% | 1 | 25.00% |
Linus Torvalds (pre-git) | 1 | 1.32% | 1 | 25.00% |
Total | 76 | 100.00% | 4 | 100.00% |
/*
* The QEMU clock as a clocksource primitive.
*/
static u64
qemu_cs_read(struct clocksource *cs)
{
return qemu_get_vmtime();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 14 | 93.33% | 1 | 50.00% |
Thomas Gleixner | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static struct clocksource qemu_cs = {
.name = "qemu",
.rating = 400,
.read = qemu_cs_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.max_idle_ns = LONG_MAX
};
/*
* The QEMU alarm as a clock_event_device primitive.
*/
static int qemu_ce_shutdown(struct clock_event_device *ce)
{
/* The mode member of CE is updated for us in generic code.
Just make sure that the event is disabled. */
qemu_set_alarm_abs(0);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 15 | 75.00% | 1 | 50.00% |
Viresh Kumar | 5 | 25.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
static int
qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
qemu_set_alarm_rel(evt);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static irqreturn_t
qemu_timer_interrupt(int irq, void *dev)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
ce->event_handler(ce);
return IRQ_HANDLED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
static void __init
init_qemu_clockevent(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
*ce = (struct clock_event_device){
.name = "qemu",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.cpumask = cpumask_of(cpu),
.set_state_shutdown = qemu_ce_shutdown,
.set_state_oneshot = qemu_ce_shutdown,
.tick_resume = qemu_ce_shutdown,
.set_next_event = qemu_ce_set_next_event,
};
clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 79 | 86.81% | 1 | 50.00% |
Viresh Kumar | 12 | 13.19% | 1 | 50.00% |
Total | 91 | 100.00% | 2 | 100.00% |
void __init
common_init_rtc(void)
{
unsigned char x, sel = 0;
/* Reset periodic interrupt frequency. */
#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
/* Test includes known working values on various platforms
where 0x26 is wrong; we refuse to change those. */
if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
sel = RTC_REF_CLCK_32KHZ + 6;
}
#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
#else
# error "Unknown HZ from arch/alpha/Kconfig"
#endif
if (sel) {
printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
CONFIG_HZ, sel);
CMOS_WRITE(sel, RTC_FREQ_SELECT);
}
/* Turn on periodic interrupts. */
x = CMOS_READ(RTC_CONTROL);
if (!(x & RTC_PIE)) {
printk("Turning on RTC interrupts.\n");
x |= RTC_PIE;
x &= ~(RTC_AIE | RTC_UIE);
CMOS_WRITE(x, RTC_CONTROL);
}
(void) CMOS_READ(RTC_INTR_FLAGS);
outb(0x36, 0x43); /* pit counter 0: system timer */
outb(0x00, 0x40);
outb(0x00, 0x40);
outb(0xb6, 0x43); /* pit counter 2: speaker */
outb(0x31, 0x42);
outb(0x13, 0x42);
init_rtc_irq();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 147 | 67.12% | 6 | 66.67% |
Richard Henderson | 71 | 32.42% | 2 | 22.22% |
Sam Ravnborg | 1 | 0.46% | 1 | 11.11% |
Total | 219 | 100.00% | 9 | 100.00% |
#ifndef CONFIG_ALPHA_WTINT
/*
* The RPCC as a clocksource primitive.
*
* While we have free-running timecounters running on all CPUs, and we make
* a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
* with the wall clock, that initialization isn't kept up-to-date across
* different time counters in SMP mode. Therefore we can only use this
* method when there's only one CPU enabled.
*
* When using the WTINT PALcall, the RPCC may shift to a lower frequency,
* or stop altogether, while waiting for the interrupt. Therefore we cannot
* use this method when WTINT is in use.
*/
static u64 read_rpcc(struct clocksource *cs)
{
return rpcc();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 14 | 93.33% | 1 | 50.00% |
Thomas Gleixner | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static struct clocksource clocksource_rpcc = {
.name = "rpcc",
.rating = 300,
.read = read_rpcc,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS
};
#endif /* ALPHA_WTINT */
/* Validate a computed cycle counter result against the known bounds for
the given processor core. There's too much brokenness in the way of
timing hardware for any one method to work everywhere. :-(
Return 0 if the result cannot be trusted, otherwise return the argument. */
static unsigned long __init
validate_cc_value(unsigned long cc)
{
static struct bounds {
unsigned int min, max;
} cpu_hz[] __initdata = {
[EV3_CPU] = { 50000000, 200000000 }, /* guess */
[EV4_CPU] = { 100000000, 300000000 },
[LCA4_CPU] = { 100000000, 300000000 }, /* guess */
[EV45_CPU] = { 200000000, 300000000 },
[EV5_CPU] = { 250000000, 433000000 },
[EV56_CPU] = { 333000000, 667000000 },
[PCA56_CPU] = { 400000000, 600000000 }, /* guess */
[PCA57_CPU] = { 500000000, 600000000 }, /* guess */
[EV6_CPU] = { 466000000, 600000000 },
[EV67_CPU] = { 600000000, 750000000 },
[EV68AL_CPU] = { 750000000, 940000000 },
[EV68CB_CPU] = { 1000000000, 1333333333 },
/* None of the following are shipping as of 2001-11-01. */
[EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */
[EV69_CPU] = { 1000000000, 1700000000 }, /* guess */
[EV7_CPU] = { 800000000, 1400000000 }, /* guess */
[EV79_CPU] = { 1000000000, 2000000000 }, /* guess */
};
/* Allow for some drift in the crystal. 10MHz is more than enough. */
const unsigned int deviation = 10000000;
struct percpu_struct *cpu;
unsigned int index;
cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
index = cpu->type & 0xffffffff;
/* If index out of bounds, no way to validate. */
if (index >= ARRAY_SIZE(cpu_hz))
return cc;
/* If index contains no data, no way to validate. */
if (cpu_hz[index].max == 0)
return cc;
if (cc < cpu_hz[index].min - deviation
|| cc > cpu_hz[index].max + deviation)
return 0;
return cc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 298 | 99.00% | 2 | 50.00% |
Ivan Kokshaysky | 2 | 0.66% | 1 | 25.00% |
Tobias Klauser | 1 | 0.33% | 1 | 25.00% |
Total | 301 | 100.00% | 4 | 100.00% |
/*
* Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
* arch/i386/time.c.
*/
#define CALIBRATE_LATCH 0xffff
#define TIMEOUT_COUNT 0x100000
static unsigned long __init
calibrate_cc_with_pit(void)
{
int cc, count = 0;
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
/*
* Now let's take care of CTC channel 2
*
* Set the Gate high, program CTC channel 2 for mode 0,
* (interrupt on terminal count mode), binary count,
* load 5 * LATCH count, (LSB and MSB) to begin countdown.
*/
outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
cc = rpcc();
do {
count++;
} while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
cc = rpcc() - cc;
/* Error: ECTCNEVERSET or ECPUTOOFAST. */
if (count <= 1 || count == TIMEOUT_COUNT)
return 0;
return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 86 | 66.15% | 2 | 18.18% |
Linus Torvalds (pre-git) | 26 | 20.00% | 7 | 63.64% |
Ivan Kokshaysky | 16 | 12.31% | 1 | 9.09% |
Andrew Morton | 2 | 1.54% | 1 | 9.09% |
Total | 130 | 100.00% | 11 | 100.00% |
/* The Linux interpretation of the CMOS clock register contents:
When the Update-In-Progress (UIP) flag goes from 1 to 0, the
RTC registers show the second which has precisely just started.
Let's hope other operating systems interpret the RTC the same way. */
static unsigned long __init
rpcc_after_update_in_progress(void)
{
do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
return rpcc();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 28 | 68.29% | 3 | 75.00% |
Linus Torvalds | 13 | 31.71% | 1 | 25.00% |
Total | 41 | 100.00% | 4 | 100.00% |
void __init
time_init(void)
{
unsigned int cc1, cc2;
unsigned long cycle_freq, tolerance;
long diff;
if (alpha_using_qemu) {
clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
init_qemu_clockevent();
timer_irqaction.handler = qemu_timer_interrupt;
init_rtc_irq();
return;
}
/* Calibrate CPU clock -- attempt #1. */
if (!est_cycle_freq)
est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
cc1 = rpcc();
/* Calibrate CPU clock -- attempt #2. */
if (!est_cycle_freq) {
cc1 = rpcc_after_update_in_progress();
cc2 = rpcc_after_update_in_progress();
est_cycle_freq = validate_cc_value(cc2 - cc1);
cc1 = cc2;
}
cycle_freq = hwrpb->cycle_freq;
if (est_cycle_freq) {
/* If the given value is within 250 PPM of what we calculated,
accept it. Otherwise, use what we found. */
tolerance = cycle_freq / 4000;
diff = cycle_freq - est_cycle_freq;
if (diff < 0)
diff = -diff;
if ((unsigned long)diff > tolerance) {
cycle_freq = est_cycle_freq;
printk("HWRPB cycle frequency bogus. "
"Estimated %lu Hz\n", cycle_freq);
} else {
est_cycle_freq = 0;
}
} else if (! validate_cc_value (cycle_freq)) {
printk("HWRPB cycle frequency bogus, "
"and unable to estimate a proper value!\n");
}
/* See above for restrictions on using clocksource_rpcc. */
#ifndef CONFIG_ALPHA_WTINT
if (hwrpb->nr_processors == 1)
clocksource_register_hz(&clocksource_rpcc, cycle_freq);
#endif
/* Startup the timer source. */
alpha_mv.init_rtc();
init_rtc_clockevent();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 74 | 34.42% | 2 | 15.38% |
Linus Torvalds (pre-git) | 72 | 33.49% | 5 | 38.46% |
Richard Henderson | 62 | 28.84% | 4 | 30.77% |
Matt Mackall | 6 | 2.79% | 1 | 7.69% |
Andrew Morton | 1 | 0.47% | 1 | 7.69% |
Total | 215 | 100.00% | 13 | 100.00% |
/* Initialize the clock_event_device for secondary cpus. */
#ifdef CONFIG_SMP
void __init
init_clockevent(void)
{
if (alpha_using_qemu)
init_qemu_clockevent();
else
init_rtc_clockevent();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 18 | 94.74% | 2 | 66.67% |
Linus Torvalds (pre-git) | 1 | 5.26% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 564 | 36.22% | 9 | 16.36% |
Linus Torvalds | 480 | 30.83% | 4 | 7.27% |
Linus Torvalds (pre-git) | 360 | 23.12% | 19 | 34.55% |
Michael Cree | 48 | 3.08% | 2 | 3.64% |
Peter Zijlstra | 22 | 1.41% | 3 | 5.45% |
Ivan Kokshaysky | 21 | 1.35% | 2 | 3.64% |
Viresh Kumar | 20 | 1.28% | 1 | 1.82% |
Andrew Morton | 6 | 0.39% | 2 | 3.64% |
Matt Mackall | 6 | 0.39% | 1 | 1.82% |
Thomas Gleixner | 6 | 0.39% | 2 | 3.64% |
Al Viro | 5 | 0.32% | 1 | 1.82% |
Stephen Hemminger | 3 | 0.19% | 1 | 1.82% |
William Lee Irwin III | 3 | 0.19% | 1 | 1.82% |
Arnaldo Carvalho de Melo | 3 | 0.19% | 1 | 1.82% |
John Stultz | 3 | 0.19% | 1 | 1.82% |
Christoph Lameter | 3 | 0.19% | 1 | 1.82% |
Tobias Klauser | 1 | 0.06% | 1 | 1.82% |
Greg Kroah-Hartman | 1 | 0.06% | 1 | 1.82% |
Sam Ravnborg | 1 | 0.06% | 1 | 1.82% |
Marc Zyngier | 1 | 0.06% | 1 | 1.82% |
Total | 1557 | 100.00% | 55 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.