Contributors: 20
Author Tokens Token Proportion Commits Commit Proportion
Thomas Gleixner 110 36.54% 12 34.29%
Andres Salomon 75 24.92% 1 2.86%
Suresh B. Siddha 16 5.32% 2 5.71%
Mike Travis 13 4.32% 1 2.86%
Glauber de Oliveira Costa 10 3.32% 2 5.71%
Andi Kleen 10 3.32% 1 2.86%
Rajvi Jingar 9 2.99% 1 2.86%
Pavel Tatashin 9 2.99% 2 5.71%
Yi Wang 8 2.66% 1 2.86%
Christopher S. Hall 8 2.66% 1 2.86%
Rusty Russell 6 1.99% 1 2.86%
Dou Liyang 6 1.99% 1 2.86%
John Stultz 5 1.66% 2 5.71%
Borislav Petkov 4 1.33% 1 2.86%
Bin Gao 4 1.33% 1 2.86%
H. Peter Anvin 3 1.00% 1 2.86%
Marcelo Tosatti 2 0.66% 1 2.86%
Greg Kroah-Hartman 1 0.33% 1 2.86%
Len Brown 1 0.33% 1 2.86%
Andrew Lutomirski 1 0.33% 1 2.86%
Total 301 35


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * x86 TSC related functions
 */
#ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H

#include <asm/processor.h>

#define NS_SCALE	10 /* 2^10, carefully chosen */
#define US_SCALE	32 /* 2^32, arbitralrily chosen */

/*
 * Standard way to access the cycle counter.
 */
typedef unsigned long long cycles_t;

extern unsigned int cpu_khz;
extern unsigned int tsc_khz;

extern void disable_TSC(void);

static inline cycles_t get_cycles(void)
{
#ifndef CONFIG_X86_TSC
	if (!boot_cpu_has(X86_FEATURE_TSC))
		return 0;
#endif

	return rdtsc();
}

extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);

extern void tsc_early_init(void);
extern void tsc_init(void);
extern unsigned long calibrate_delay_is_known(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);

extern int tsc_clocksource_reliable;
#ifdef CONFIG_X86_TSC
extern bool tsc_async_resets;
#else
# define tsc_async_resets	false
#endif

/*
 * Boot-time check whether the TSCs are synchronized across
 * all CPUs/cores:
 */
#ifdef CONFIG_X86_TSC
extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
#else
static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
static inline void tsc_verify_tsc_adjust(bool resume) { }
static inline void check_tsc_sync_source(int cpu) { }
static inline void check_tsc_sync_target(void) { }
#endif

extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);

unsigned long cpu_khz_from_msr(void);

#endif /* _ASM_X86_TSC_H */