Contributors: 26
Author Tokens Token Proportion Commits Commit Proportion
Peter Zijlstra 62 17.82% 4 7.27%
Thomas Gleixner 38 10.92% 8 14.55%
Andrew Morton 34 9.77% 5 9.09%
Jithu Joseph 29 8.33% 4 7.27%
Borislav Petkov 26 7.47% 3 5.45%
Linus Torvalds (pre-git) 23 6.61% 5 9.09%
Andi Kleen 21 6.03% 3 5.45%
Fenghua Yu 17 4.89% 2 3.64%
Ricardo Neri 14 4.02% 1 1.82%
Sean Christopherson 13 3.74% 2 3.64%
Yinghai Lu 12 3.45% 2 3.64%
Jaswinder Singh Rajput 12 3.45% 1 1.82%
Vitaly Kuznetsov 7 2.01% 1 1.82%
Ashok Raj 7 2.01% 1 1.82%
Len Brown 5 1.44% 1 1.82%
Hiroshi Shimamoto 4 1.15% 1 1.82%
Daniel Sneddon 3 0.86% 1 1.82%
Ingo Molnar 3 0.86% 1 1.82%
Dave Jones 3 0.86% 1 1.82%
Glauber de Oliveira Costa 3 0.86% 2 3.64%
Yanmin Zhang 3 0.86% 1 1.82%
H. Peter Anvin 3 0.86% 1 1.82%
Matthew Dobson 3 0.86% 1 1.82%
Mike Travis 1 0.29% 1 1.82%
Chuck Ebbert 1 0.29% 1 1.82%
Greg Kroah-Hartman 1 0.29% 1 1.82%
Total 348 55


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPU_H
#define _ASM_X86_CPU_H

#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
#include <asm/ibt.h>

#ifdef CONFIG_SMP

extern void prefill_possible_map(void);

#else /* CONFIG_SMP */

static inline void prefill_possible_map(void) {}

#define cpu_physical_id(cpu)			boot_cpu_physical_apicid
#define cpu_acpi_id(cpu)			0
#define safe_smp_processor_id()			0

#endif /* CONFIG_SMP */

struct x86_cpu {
	struct cpu cpu;
};

#ifdef CONFIG_HOTPLUG_CPU
extern void soft_restart_cpu(void);
#endif

extern void ap_init_aperfmperf(void);

int mwait_usable(const struct cpuinfo_x86 *);

unsigned int x86_family(unsigned int sig);
unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL
extern void __init sld_setup(struct cpuinfo_x86 *c);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip);
extern void handle_bus_lock(struct pt_regs *regs);
u8 get_this_hybrid_cpu_type(void);
#else
static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
	return false;
}

static inline bool handle_guest_split_lock(unsigned long ip)
{
	return false;
}

static inline void handle_bus_lock(struct pt_regs *regs) {}

static inline u8 get_this_hybrid_cpu_type(void)
{
	return 0;
}
#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#else
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
#endif

extern __noendbr void cet_disable(void);

struct cpu_signature;

void intel_collect_cpu_info(struct cpu_signature *sig);

extern u64 x86_read_arch_cap_msr(void);
bool intel_find_matching_signature(void *mc, struct cpu_signature *sig);
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);

extern struct cpumask cpus_stop_mask;

#endif /* _ASM_X86_CPU_H */