Contributors: 25
Author Tokens Token Proportion Commits Commit Proportion
Peter Zijlstra 62 18.29% 4 7.41%
Thomas Gleixner 38 11.21% 8 14.81%
Jithu Joseph 29 8.55% 4 7.41%
Andrew Morton 26 7.67% 5 9.26%
Borislav Petkov 26 7.67% 3 5.56%
Linus Torvalds (pre-git) 23 6.78% 5 9.26%
Andi Kleen 21 6.19% 3 5.56%
Fenghua Yu 17 5.01% 2 3.70%
Ricardo Neri 14 4.13% 1 1.85%
Sean Christopherson 13 3.83% 2 3.70%
Jaswinder Singh Rajput 12 3.54% 1 1.85%
Yinghai Lu 12 3.54% 2 3.70%
Ashok Raj 7 2.06% 1 1.85%
Vitaly Kuznetsov 7 2.06% 1 1.85%
Len Brown 5 1.47% 1 1.85%
Hiroshi Shimamoto 4 1.18% 1 1.85%
Yanmin Zhang 3 0.88% 1 1.85%
Matthew Dobson 3 0.88% 1 1.85%
Daniel Sneddon 3 0.88% 1 1.85%
Dave Jones 3 0.88% 1 1.85%
Glauber de Oliveira Costa 3 0.88% 2 3.70%
Ingo Molnar 3 0.88% 1 1.85%
H. Peter Anvin 3 0.88% 1 1.85%
Chuck Ebbert 1 0.29% 1 1.85%
Greg Kroah-Hartman 1 0.29% 1 1.85%
Total 339 54


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPU_H
#define _ASM_X86_CPU_H

#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
#include <asm/ibt.h>

#ifdef CONFIG_SMP

extern void prefill_possible_map(void);

#else /* CONFIG_SMP */

static inline void prefill_possible_map(void) {}

#define cpu_physical_id(cpu)			boot_cpu_physical_apicid
#define cpu_acpi_id(cpu)			0
#define safe_smp_processor_id()			0

#endif /* CONFIG_SMP */

#ifdef CONFIG_HOTPLUG_CPU
extern void soft_restart_cpu(void);
#endif

extern void ap_init_aperfmperf(void);

int mwait_usable(const struct cpuinfo_x86 *);

unsigned int x86_family(unsigned int sig);
unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL
extern void __init sld_setup(struct cpuinfo_x86 *c);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip);
extern void handle_bus_lock(struct pt_regs *regs);
u8 get_this_hybrid_cpu_type(void);
#else
static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
	return false;
}

static inline bool handle_guest_split_lock(unsigned long ip)
{
	return false;
}

static inline void handle_bus_lock(struct pt_regs *regs) {}

static inline u8 get_this_hybrid_cpu_type(void)
{
	return 0;
}
#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#else
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
#endif

extern __noendbr void cet_disable(void);

struct cpu_signature;

void intel_collect_cpu_info(struct cpu_signature *sig);

extern u64 x86_read_arch_cap_msr(void);
bool intel_find_matching_signature(void *mc, struct cpu_signature *sig);
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);

extern struct cpumask cpus_stop_mask;

#endif /* _ASM_X86_CPU_H */