Contributors: 29
Author Tokens Token Proportion Commits Commit Proportion
Linus Torvalds (pre-git) 96 21.10% 1 2.13%
Dave Jones 85 18.68% 2 4.26%
Pawan Gupta 57 12.53% 4 8.51%
Borislav Petkov 35 7.69% 4 8.51%
Alex Shi 29 6.37% 1 2.13%
Jan Beulich 26 5.71% 2 4.26%
Thomas Petazzoni 18 3.96% 1 2.13%
Yinghai Lu 13 2.86% 2 4.26%
Linus Torvalds 13 2.86% 1 2.13%
Thomas Gleixner 13 2.86% 6 12.77%
Andi Kleen 12 2.64% 3 6.38%
David Woodhouse 8 1.76% 1 2.13%
H. Peter Anvin 6 1.32% 1 2.13%
M. Vefa Bicakci 5 1.10% 1 2.13%
David Wang 4 0.88% 2 4.26%
Rafael J. Wysocki 4 0.88% 1 2.13%
Daniel Sneddon 3 0.66% 1 2.13%
Venkatesh Pallipadi 3 0.66% 1 2.13%
Jane Malalane 3 0.66% 1 2.13%
Peter Zijlstra 3 0.66% 1 2.13%
Mark Gross 3 0.66% 1 2.13%
Konrad Rzeszutek Wilk 3 0.66% 1 2.13%
Suravee Suthikulpanit 3 0.66% 2 4.26%
Pu Wen 3 0.66% 1 2.13%
Suresh B. Siddha 2 0.44% 1 2.13%
Robert Richter 2 0.44% 1 2.13%
Viresh Kumar 1 0.22% 1 2.13%
Joe Perches 1 0.22% 1 2.13%
Greg Kroah-Hartman 1 0.22% 1 2.13%
Total 455 47


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_X86_CPU_H
#define ARCH_X86_CPU_H

/* attempt to consolidate cpu attributes */
struct cpu_dev {
	const char	*c_vendor;

	/* some have two possibilities for cpuid string */
	const char	*c_ident[2];

	void            (*c_early_init)(struct cpuinfo_x86 *);
	void		(*c_bsp_init)(struct cpuinfo_x86 *);
	void		(*c_init)(struct cpuinfo_x86 *);
	void		(*c_identify)(struct cpuinfo_x86 *);
	void		(*c_detect_tlb)(struct cpuinfo_x86 *);
	int		c_x86_vendor;
#ifdef CONFIG_X86_32
	/* Optional vendor specific routine to obtain the cache size. */
	unsigned int	(*legacy_cache_size)(struct cpuinfo_x86 *,
					     unsigned int);

	/* Family/stepping-based lookup table for model names. */
	struct legacy_cpu_model_info {
		int		family;
		const char	*model_names[16];
	}		legacy_models[5];
#endif
};

struct _tlb_table {
	unsigned char descriptor;
	char tlb_type;
	unsigned int entries;
	/* unsigned int ways; */
	char info[128];
};

#define cpu_dev_register(cpu_devX) \
	static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
	__section(".x86_cpu_dev.init") = \
	&cpu_devX;

extern const struct cpu_dev *const __x86_cpu_dev_start[],
			    *const __x86_cpu_dev_end[];

#ifdef CONFIG_CPU_SUP_INTEL
enum tsx_ctrl_states {
	TSX_CTRL_ENABLE,
	TSX_CTRL_DISABLE,
	TSX_CTRL_RTM_ALWAYS_ABORT,
	TSX_CTRL_NOT_SUPPORTED,
};

extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;

extern void __init tsx_init(void);
void tsx_ap_init(void);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */

extern void init_spectral_chicken(struct cpuinfo_x86 *c);

extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);

extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
extern int detect_extended_topology(struct cpuinfo_x86 *c);
extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c);

unsigned int aperfmperf_get_khz(int cpu);
void cpu_select_mitigations(void);

extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void);
extern void update_gds_msr(void);

extern enum spectre_v2_mitigation spectre_v2_enabled;

static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{
	return mode == SPECTRE_V2_EIBRS ||
	       mode == SPECTRE_V2_EIBRS_RETPOLINE ||
	       mode == SPECTRE_V2_EIBRS_LFENCE;
}
#endif /* ARCH_X86_CPU_H */