Contributors: 27
Author Tokens Token Proportion Commits Commit Proportion
Andi Kleen 67 24.28% 7 14.29%
Thomas Gleixner 60 21.74% 11 22.45%
John Stultz 27 9.78% 2 4.08%
Andres Salomon 24 8.70% 1 2.04%
Christopher S. Hall 11 3.99% 1 2.04%
Jason A. Donenfeld 11 3.99% 1 2.04%
Linus Torvalds 10 3.62% 1 2.04%
Mike Travis 7 2.54% 1 2.04%
Alok N Kataria 6 2.17% 2 4.08%
Linus Torvalds (pre-git) 6 2.17% 2 4.08%
Rajvi Jingar 6 2.17% 1 2.04%
Ingo Molnar 5 1.81% 2 4.08%
Pavel Tatashin 5 1.81% 2 4.08%
Bin Gao 4 1.45% 1 2.04%
Suresh B. Siddha 3 1.09% 2 4.08%
Glauber de Oliveira Costa 3 1.09% 1 2.04%
H. Peter Anvin 3 1.09% 1 2.04%
Andrea Arcangeli 3 1.09% 1 2.04%
Borislav Petkov 3 1.09% 1 2.04%
Dave Jones 2 0.72% 1 2.04%
Rusty Russell 2 0.72% 1 2.04%
Marcelo Tosatti 2 0.72% 1 2.04%
Dou Liyang 2 0.72% 1 2.04%
Len Brown 1 0.36% 1 2.04%
Andrew Morton 1 0.36% 1 2.04%
Andrew Lutomirski 1 0.36% 1 2.04%
Greg Kroah-Hartman 1 0.36% 1 2.04%
Total 276 49


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * x86 TSC related functions
 */
#ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H

#include <asm/processor.h>
#include <asm/cpufeature.h>

/*
 * Standard way to access the cycle counter.
 */
typedef unsigned long long cycles_t;

extern unsigned int cpu_khz;
extern unsigned int tsc_khz;

extern void disable_TSC(void);

static inline cycles_t get_cycles(void)
{
	if (!IS_ENABLED(CONFIG_X86_TSC) &&
	    !cpu_feature_enabled(X86_FEATURE_TSC))
		return 0;
	return rdtsc();
}
#define get_cycles get_cycles

extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);

extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);

extern int tsc_clocksource_reliable;
#ifdef CONFIG_X86_TSC
extern bool tsc_async_resets;
#else
# define tsc_async_resets	false
#endif

/*
 * Boot-time check whether the TSCs are synchronized across
 * all CPUs/cores:
 */
#ifdef CONFIG_X86_TSC
extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_target(void);
#else
static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
static inline void tsc_verify_tsc_adjust(bool resume) { }
static inline void check_tsc_sync_target(void) { }
#endif

extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);

unsigned long cpu_khz_from_msr(void);

#endif /* _ASM_X86_TSC_H */