Contributors: 28
Author Tokens Token Proportion Commits Commit Proportion
Thomas Gleixner 72 23.84% 11 22.00%
Andi Kleen 71 23.51% 7 14.00%
John Stultz 27 8.94% 2 4.00%
Andres Salomon 24 7.95% 1 2.00%
Ingo Molnar 11 3.64% 2 4.00%
Jason A. Donenfeld 11 3.64% 1 2.00%
Christopher S. Hall 11 3.64% 1 2.00%
Linus Torvalds 10 3.31% 1 2.00%
Mike Travis 7 2.32% 1 2.00%
Alok N Kataria 6 1.99% 2 4.00%
Linus Torvalds (pre-git) 6 1.99% 2 4.00%
Rajvi Jingar 6 1.99% 1 2.00%
Pavel Tatashin 5 1.66% 2 4.00%
Bin Gao 4 1.32% 1 2.00%
Yi Wang 4 1.32% 1 2.00%
Borislav Petkov 3 0.99% 1 2.00%
Glauber de Oliveira Costa 3 0.99% 1 2.00%
Suresh B. Siddha 3 0.99% 2 4.00%
H. Peter Anvin 3 0.99% 1 2.00%
Andrea Arcangeli 3 0.99% 1 2.00%
Dave Jones 2 0.66% 1 2.00%
Dou Liyang 2 0.66% 1 2.00%
Marcelo Tosatti 2 0.66% 1 2.00%
Rusty Russell 2 0.66% 1 2.00%
Greg Kroah-Hartman 1 0.33% 1 2.00%
Len Brown 1 0.33% 1 2.00%
Andrew Morton 1 0.33% 1 2.00%
Andrew Lutomirski 1 0.33% 1 2.00%
Total 302 50


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * x86 TSC related functions
 */
#ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H

#include <asm/processor.h>
#include <asm/cpufeature.h>

/*
 * Standard way to access the cycle counter.
 */
typedef unsigned long long cycles_t;

extern unsigned int cpu_khz;
extern unsigned int tsc_khz;

extern void disable_TSC(void);

static inline cycles_t get_cycles(void)
{
	if (!IS_ENABLED(CONFIG_X86_TSC) &&
	    !cpu_feature_enabled(X86_FEATURE_TSC))
		return 0;
	return rdtsc();
}
#define get_cycles get_cycles

extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);

extern void tsc_early_init(void);
extern void tsc_init(void);
extern unsigned long calibrate_delay_is_known(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);

extern int tsc_clocksource_reliable;
#ifdef CONFIG_X86_TSC
extern bool tsc_async_resets;
#else
# define tsc_async_resets	false
#endif

/*
 * Boot-time check whether the TSCs are synchronized across
 * all CPUs/cores:
 */
#ifdef CONFIG_X86_TSC
extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
#else
static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
static inline void tsc_verify_tsc_adjust(bool resume) { }
static inline void check_tsc_sync_source(int cpu) { }
static inline void check_tsc_sync_target(void) { }
#endif

extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);

unsigned long cpu_khz_from_msr(void);

#endif /* _ASM_X86_TSC_H */