Contributors: 33
Author Tokens Token Proportion Commits Commit Proportion
Dave Jones 85 19.50% 2 3.85%
Linus Torvalds (pre-git) 77 17.66% 1 1.92%
Borislav Petkov 47 10.78% 5 9.62%
Pawan Gupta 44 10.09% 4 7.69%
Jan Beulich 26 5.96% 2 3.85%
Thomas Gleixner 25 5.73% 7 13.46%
Ahmed S. Darwish 18 4.13% 2 3.85%
Thomas Petazzoni 17 3.90% 1 1.92%
Yinghai Lu 17 3.90% 3 5.77%
Linus Torvalds 9 2.06% 1 1.92%
Andi Kleen 8 1.83% 2 3.85%
David Woodhouse 8 1.83% 1 1.92%
Alex Shi 6 1.38% 1 1.92%
Konrad Rzeszutek Wilk 5 1.15% 1 1.92%
Robert Richter 4 0.92% 1 1.92%
Rafael J. Wysocki 4 0.92% 1 1.92%
Jane Malalane 3 0.69% 1 1.92%
Daniel Sneddon 3 0.69% 1 1.92%
Venkatesh Pallipadi 3 0.69% 1 1.92%
H. Peter Anvin 3 0.69% 1 1.92%
Peter Zijlstra 3 0.69% 1 1.92%
Pu Wen 3 0.69% 1 1.92%
Mark Gross 3 0.69% 1 1.92%
M. Vefa Bicakci 3 0.69% 1 1.92%
Sudeep Holla 2 0.46% 1 1.92%
Suravee Suthikulpanit 2 0.46% 1 1.92%
Andrew Morton 2 0.46% 1 1.92%
Greg Kroah-Hartman 1 0.23% 1 1.92%
David Wang 1 0.23% 1 1.92%
Joe Perches 1 0.23% 1 1.92%
Viresh Kumar 1 0.23% 1 1.92%
Andreas Herrmann 1 0.23% 1 1.92%
Jaswinder Singh Rajput 1 0.23% 1 1.92%
Total 436 52


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_X86_CPU_H
#define ARCH_X86_CPU_H

#include <asm/cpu.h>
#include <asm/topology.h>

#include "topology.h"

/* attempt to consolidate cpu attributes */
struct cpu_dev {
	const char	*c_vendor;

	/* some have two possibilities for cpuid string */
	const char	*c_ident[2];

	void            (*c_early_init)(struct cpuinfo_x86 *);
	void		(*c_bsp_init)(struct cpuinfo_x86 *);
	void		(*c_init)(struct cpuinfo_x86 *);
	void		(*c_identify)(struct cpuinfo_x86 *);
	void		(*c_detect_tlb)(struct cpuinfo_x86 *);
	int		c_x86_vendor;
#ifdef CONFIG_X86_32
	/* Optional vendor specific routine to obtain the cache size. */
	unsigned int	(*legacy_cache_size)(struct cpuinfo_x86 *,
					     unsigned int);

	/* Family/stepping-based lookup table for model names. */
	struct legacy_cpu_model_info {
		int		family;
		const char	*model_names[16];
	}		legacy_models[5];
#endif
};

#define cpu_dev_register(cpu_devX) \
	static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
	__section(".x86_cpu_dev.init") = \
	&cpu_devX;

extern const struct cpu_dev *const __x86_cpu_dev_start[],
			    *const __x86_cpu_dev_end[];

#ifdef CONFIG_CPU_SUP_INTEL
extern void __init tsx_init(void);
void tsx_ap_init(void);
void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
#endif /* CONFIG_CPU_SUP_INTEL */

extern void init_spectral_chicken(struct cpuinfo_x86 *c);

extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);

extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c);

#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
struct amd_northbridge *amd_init_l3_cache(int index);
#else
static inline struct amd_northbridge *amd_init_l3_cache(int index)
{
	return NULL;
}
#endif

unsigned int aperfmperf_get_khz(int cpu);
void cpu_select_mitigations(void);

extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void);
extern void update_gds_msr(void);

extern enum spectre_v2_mitigation spectre_v2_enabled;

static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{
	return mode == SPECTRE_V2_EIBRS ||
	       mode == SPECTRE_V2_EIBRS_RETPOLINE ||
	       mode == SPECTRE_V2_EIBRS_LFENCE;
}

#endif /* ARCH_X86_CPU_H */