Contributors: 26
Author Tokens Token Proportion Commits Commit Proportion
Andi Kleen 67 25.67% 7 14.58%
Thomas Gleixner 60 22.99% 11 22.92%
John Stultz 27 10.34% 2 4.17%
Andres Salomon 24 9.20% 1 2.08%
Jason A. Donenfeld 11 4.21% 1 2.08%
Linus Torvalds 10 3.83% 1 2.08%
Mike Travis 7 2.68% 1 2.08%
Ingo Molnar 7 2.68% 2 4.17%
Linus Torvalds (pre-git) 6 2.30% 2 4.17%
Alok N Kataria 6 2.30% 2 4.17%
Pavel Tatashin 5 1.92% 2 4.17%
Bin Gao 4 1.53% 1 2.08%
H. Peter Anvin 3 1.15% 1 2.08%
Borislav Petkov 3 1.15% 1 2.08%
Glauber de Oliveira Costa 3 1.15% 1 2.08%
Suresh B. Siddha 3 1.15% 2 4.17%
Andrea Arcangeli 3 1.15% 1 2.08%
Marcelo Tosatti 2 0.77% 1 2.08%
Rusty Russell 2 0.77% 1 2.08%
Dave Jones 2 0.77% 1 2.08%
Greg Kroah-Hartman 1 0.38% 1 2.08%
Len Brown 1 0.38% 1 2.08%
Andrew Morton 1 0.38% 1 2.08%
Dou Liyang 1 0.38% 1 2.08%
Andrew Lutomirski 1 0.38% 1 2.08%
Christopher S. Hall 1 0.38% 1 2.08%
Total 261 48


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * x86 TSC related functions
 */
#ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H

#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/msr.h>

/*
 * Standard way to access the cycle counter.
 */
typedef unsigned long long cycles_t;

extern unsigned int cpu_khz;
extern unsigned int tsc_khz;

extern void disable_TSC(void);

static inline cycles_t get_cycles(void)
{
	if (!IS_ENABLED(CONFIG_X86_TSC) &&
	    !cpu_feature_enabled(X86_FEATURE_TSC))
		return 0;
	return rdtsc();
}
#define get_cycles get_cycles

extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);

extern int tsc_clocksource_reliable;
#ifdef CONFIG_X86_TSC
extern bool tsc_async_resets;
#else
# define tsc_async_resets	false
#endif

/*
 * Boot-time check whether the TSCs are synchronized across
 * all CPUs/cores:
 */
#ifdef CONFIG_X86_TSC
extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_target(void);
#else
static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
static inline void tsc_verify_tsc_adjust(bool resume) { }
static inline void check_tsc_sync_target(void) { }
#endif

extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);

unsigned long cpu_khz_from_msr(void);

#endif /* _ASM_X86_TSC_H */