cregit-Linux how code gets into the kernel

Release 4.14 arch/alpha/kernel/time.c

// SPDX-License-Identifier: GPL-2.0
/*
 *  linux/arch/alpha/kernel/time.c
 *
 *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
 *
 * This file contains the clocksource time handling.
 * 1997-09-10   Updated NTP code according to technical memorandum Jan '96
 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
 * 1997-01-09    Adrian Sun
 *      use interval timer if CONFIG_RTC=y
 * 1997-10-29    John Bowman (bowman@math.ualberta.ca)
 *      fixed tick loss calculation in timer_interrupt
 *      (round system clock to nearest tick instead of truncating)
 *      fixed algorithm in time_init for getting time from CMOS clock
 * 1999-04-16   Thorsten Kranzkowski (dl8bcu@gmx.net)
 *      fixed algorithm in do_gettimeofday() for calculating the precise time
 *      from processor cycle counter (now taking lost_ticks into account)
 * 2003-06-03   R. Scott Bailey <scott.bailey@eds.com>
 *      Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
 */
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/profile.h>
#include <linux/irq_work.h>

#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/hwrpb.h>

#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>

#include "proto.h"
#include "irq_impl.h"


DEFINE_SPINLOCK(rtc_lock);

EXPORT_SYMBOL(rtc_lock);


unsigned long est_cycle_freq;

#ifdef CONFIG_IRQ_WORK

DEFINE_PER_CPU(u8, irq_work_pending);


#define set_irq_work_pending_flag()  __this_cpu_write(irq_work_pending, 1)

#define test_irq_work_pending()      __this_cpu_read(irq_work_pending)

#define clear_irq_work_pending()     __this_cpu_write(irq_work_pending, 0)


void arch_irq_work_raise(void) { set_irq_work_pending_flag(); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Cree880.00%133.33%
Peter Zijlstra220.00%266.67%
Total10100.00%3100.00%

#else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 #define clear_irq_work_pending() #endif /* CONFIG_IRQ_WORK */
static inline __u32 rpcc(void) { return __builtin_alpha_rpcc(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1184.62%150.00%
Richard Henderson215.38%150.00%
Total13100.00%2100.00%

/* * The RTC as a clock_event_device primitive. */ static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
irqreturn_t rtc_timer_interrupt(int irq, void *dev) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); /* Don't run the hook for UNUSED or SHUTDOWN. */ if (likely(clockevent_state_periodic(ce))) ce->event_handler(ce); if (test_irq_work_pending()) { clear_irq_work_pending(); irq_work_run(); } return IRQ_HANDLED; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson3046.15%215.38%
Linus Torvalds (pre-git)1624.62%646.15%
Michael Cree1015.38%17.69%
Peter Zijlstra57.69%215.38%
Viresh Kumar34.62%17.69%
Marc Zyngier11.54%17.69%
Total65100.00%13100.00%


static int rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) { /* This hook is for oneshot mode, which we don't support. */ return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson20100.00%1100.00%
Total20100.00%1100.00%


static void __init init_rtc_clockevent(void) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); *ce = (struct clock_event_device){ .name = "rtc", .features = CLOCK_EVT_FEAT_PERIODIC, .rating = 100, .cpumask = cpumask_of(cpu), .set_next_event = rtc_ce_set_next_event, }; clockevents_config_and_register(ce, CONFIG_HZ, 0, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson7092.11%250.00%
Peter Zijlstra56.58%125.00%
Linus Torvalds (pre-git)11.32%125.00%
Total76100.00%4100.00%

/* * The QEMU clock as a clocksource primitive. */
static u64 qemu_cs_read(struct clocksource *cs) { return qemu_get_vmtime(); }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson1493.33%150.00%
Thomas Gleixner16.67%150.00%
Total15100.00%2100.00%

static struct clocksource qemu_cs = { .name = "qemu", .rating = 400, .read = qemu_cs_read, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .max_idle_ns = LONG_MAX }; /* * The QEMU alarm as a clock_event_device primitive. */
static int qemu_ce_shutdown(struct clock_event_device *ce) { /* The mode member of CE is updated for us in generic code. Just make sure that the event is disabled. */ qemu_set_alarm_abs(0); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson1575.00%150.00%
Viresh Kumar525.00%150.00%
Total20100.00%2100.00%


static int qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) { qemu_set_alarm_rel(evt); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson23100.00%1100.00%
Total23100.00%1100.00%


static irqreturn_t qemu_timer_interrupt(int irq, void *dev) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); ce->event_handler(ce); return IRQ_HANDLED; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson42100.00%1100.00%
Total42100.00%1100.00%


static void __init init_qemu_clockevent(void) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); *ce = (struct clock_event_device){ .name = "qemu", .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 400, .cpumask = cpumask_of(cpu), .set_state_shutdown = qemu_ce_shutdown, .set_state_oneshot = qemu_ce_shutdown, .tick_resume = qemu_ce_shutdown, .set_next_event = qemu_ce_set_next_event, }; clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX); }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson7986.81%150.00%
Viresh Kumar1213.19%150.00%
Total91100.00%2100.00%


void __init common_init_rtc(void) { unsigned char x, sel = 0; /* Reset periodic interrupt frequency. */ #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; /* Test includes known working values on various platforms where 0x26 is wrong; we refuse to change those. */ if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { sel = RTC_REF_CLCK_32KHZ + 6; } #elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32 sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ); #else # error "Unknown HZ from arch/alpha/Kconfig" #endif if (sel) { printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n", CONFIG_HZ, sel); CMOS_WRITE(sel, RTC_FREQ_SELECT); } /* Turn on periodic interrupts. */ x = CMOS_READ(RTC_CONTROL); if (!(x & RTC_PIE)) { printk("Turning on RTC interrupts.\n"); x |= RTC_PIE; x &= ~(RTC_AIE | RTC_UIE); CMOS_WRITE(x, RTC_CONTROL); } (void) CMOS_READ(RTC_INTR_FLAGS); outb(0x36, 0x43); /* pit counter 0: system timer */ outb(0x00, 0x40); outb(0x00, 0x40); outb(0xb6, 0x43); /* pit counter 2: speaker */ outb(0x31, 0x42); outb(0x13, 0x42); init_rtc_irq(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)14767.12%666.67%
Richard Henderson7132.42%222.22%
Sam Ravnborg10.46%111.11%
Total219100.00%9100.00%

#ifndef CONFIG_ALPHA_WTINT /* * The RPCC as a clocksource primitive. * * While we have free-running timecounters running on all CPUs, and we make * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter * with the wall clock, that initialization isn't kept up-to-date across * different time counters in SMP mode. Therefore we can only use this * method when there's only one CPU enabled. * * When using the WTINT PALcall, the RPCC may shift to a lower frequency, * or stop altogether, while waiting for the interrupt. Therefore we cannot * use this method when WTINT is in use. */
static u64 read_rpcc(struct clocksource *cs) { return rpcc(); }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson1493.33%150.00%
Thomas Gleixner16.67%150.00%
Total15100.00%2100.00%

static struct clocksource clocksource_rpcc = { .name = "rpcc", .rating = 300, .read = read_rpcc, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS }; #endif /* ALPHA_WTINT */ /* Validate a computed cycle counter result against the known bounds for the given processor core. There's too much brokenness in the way of timing hardware for any one method to work everywhere. :-( Return 0 if the result cannot be trusted, otherwise return the argument. */
static unsigned long __init validate_cc_value(unsigned long cc) { static struct bounds { unsigned int min, max; } cpu_hz[] __initdata = { [EV3_CPU] = { 50000000, 200000000 }, /* guess */ [EV4_CPU] = { 100000000, 300000000 }, [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ [EV45_CPU] = { 200000000, 300000000 }, [EV5_CPU] = { 250000000, 433000000 }, [EV56_CPU] = { 333000000, 667000000 }, [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ [EV6_CPU] = { 466000000, 600000000 }, [EV67_CPU] = { 600000000, 750000000 }, [EV68AL_CPU] = { 750000000, 940000000 }, [EV68CB_CPU] = { 1000000000, 1333333333 }, /* None of the following are shipping as of 2001-11-01. */ [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ }; /* Allow for some drift in the crystal. 10MHz is more than enough. */ const unsigned int deviation = 10000000; struct percpu_struct *cpu; unsigned int index; cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); index = cpu->type & 0xffffffff; /* If index out of bounds, no way to validate. */ if (index >= ARRAY_SIZE(cpu_hz)) return cc; /* If index contains no data, no way to validate. */ if (cpu_hz[index].max == 0) return cc; if (cc < cpu_hz[index].min - deviation || cc > cpu_hz[index].max + deviation) return 0; return cc; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds29899.00%250.00%
Ivan Kokshaysky20.66%125.00%
Tobias Klauser10.33%125.00%
Total301100.00%4100.00%

/* * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from * arch/i386/time.c. */ #define CALIBRATE_LATCH 0xffff #define TIMEOUT_COUNT 0x100000
static unsigned long __init calibrate_cc_with_pit(void) { int cc, count = 0; /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Now let's take care of CTC channel 2 * * Set the Gate high, program CTC channel 2 for mode 0, * (interrupt on terminal count mode), binary count, * load 5 * LATCH count, (LSB and MSB) to begin countdown. */ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ cc = rpcc(); do { count++; } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); cc = rpcc() - cc; /* Error: ECTCNEVERSET or ECPUTOOFAST. */ if (count <= 1 || count == TIMEOUT_COUNT) return 0; return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds8666.15%218.18%
Linus Torvalds (pre-git)2620.00%763.64%
Ivan Kokshaysky1612.31%19.09%
Andrew Morton21.54%19.09%
Total130100.00%11100.00%

/* The Linux interpretation of the CMOS clock register contents: When the Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the second which has precisely just started. Let's hope other operating systems interpret the RTC the same way. */
static unsigned long __init rpcc_after_update_in_progress(void) { do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); return rpcc(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2868.29%375.00%
Linus Torvalds1331.71%125.00%
Total41100.00%4100.00%


void __init time_init(void) { unsigned int cc1, cc2; unsigned long cycle_freq, tolerance; long diff; if (alpha_using_qemu) { clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); init_qemu_clockevent(); timer_irqaction.handler = qemu_timer_interrupt; init_rtc_irq(); return; } /* Calibrate CPU clock -- attempt #1. */ if (!est_cycle_freq) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); cc1 = rpcc(); /* Calibrate CPU clock -- attempt #2. */ if (!est_cycle_freq) { cc1 = rpcc_after_update_in_progress(); cc2 = rpcc_after_update_in_progress(); est_cycle_freq = validate_cc_value(cc2 - cc1); cc1 = cc2; } cycle_freq = hwrpb->cycle_freq; if (est_cycle_freq) { /* If the given value is within 250 PPM of what we calculated, accept it. Otherwise, use what we found. */ tolerance = cycle_freq / 4000; diff = cycle_freq - est_cycle_freq; if (diff < 0) diff = -diff; if ((unsigned long)diff > tolerance) { cycle_freq = est_cycle_freq; printk("HWRPB cycle frequency bogus. " "Estimated %lu Hz\n", cycle_freq); } else { est_cycle_freq = 0; } } else if (! validate_cc_value (cycle_freq)) { printk("HWRPB cycle frequency bogus, " "and unable to estimate a proper value!\n"); } /* See above for restrictions on using clocksource_rpcc. */ #ifndef CONFIG_ALPHA_WTINT if (hwrpb->nr_processors == 1) clocksource_register_hz(&clocksource_rpcc, cycle_freq); #endif /* Startup the timer source. */ alpha_mv.init_rtc(); init_rtc_clockevent(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds7434.42%215.38%
Linus Torvalds (pre-git)7233.49%538.46%
Richard Henderson6228.84%430.77%
Matt Mackall62.79%17.69%
Andrew Morton10.47%17.69%
Total215100.00%13100.00%

/* Initialize the clock_event_device for secondary cpus. */ #ifdef CONFIG_SMP
void __init init_clockevent(void) { if (alpha_using_qemu) init_qemu_clockevent(); else init_rtc_clockevent(); }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson1894.74%266.67%
Linus Torvalds (pre-git)15.26%133.33%
Total19100.00%3100.00%

#endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson56436.22%916.36%
Linus Torvalds48030.83%47.27%
Linus Torvalds (pre-git)36023.12%1934.55%
Michael Cree483.08%23.64%
Peter Zijlstra221.41%35.45%
Ivan Kokshaysky211.35%23.64%
Viresh Kumar201.28%11.82%
Andrew Morton60.39%23.64%
Matt Mackall60.39%11.82%
Thomas Gleixner60.39%23.64%
Al Viro50.32%11.82%
Stephen Hemminger30.19%11.82%
William Lee Irwin III30.19%11.82%
Arnaldo Carvalho de Melo30.19%11.82%
John Stultz30.19%11.82%
Christoph Lameter30.19%11.82%
Tobias Klauser10.06%11.82%
Greg Kroah-Hartman10.06%11.82%
Sam Ravnborg10.06%11.82%
Marc Zyngier10.06%11.82%
Total1557100.00%55100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.