Contributors: 26
Author Tokens Token Proportion Commits Commit Proportion
Peter Zijlstra 62 14.55% 4 6.90%
Borislav Petkov 61 14.32% 6 10.34%
Fenghua Yu 60 14.08% 5 8.62%
Andrew Morton 42 9.86% 5 8.62%
Jithu Joseph 28 6.57% 4 6.90%
Linus Torvalds (pre-git) 23 5.40% 5 8.62%
Andi Kleen 22 5.16% 4 6.90%
Thomas Gleixner 22 5.16% 4 6.90%
Ricardo Neri 14 3.29% 1 1.72%
Len Brown 13 3.05% 1 1.72%
Sean Christopherson 13 3.05% 2 3.45%
Jaswinder Singh Rajput 12 2.82% 1 1.72%
Yinghai Lu 12 2.82% 2 3.45%
Vitaly Kuznetsov 7 1.64% 1 1.72%
Ashok Raj 7 1.64% 1 1.72%
Hiroshi Shimamoto 4 0.94% 1 1.72%
Matthew Dobson 3 0.70% 1 1.72%
Yanmin Zhang 3 0.70% 1 1.72%
Daniel Sneddon 3 0.70% 1 1.72%
H. Peter Anvin 3 0.70% 1 1.72%
Glauber de Oliveira Costa 3 0.70% 2 3.45%
Dave Jones 3 0.70% 1 1.72%
Ingo Molnar 3 0.70% 1 1.72%
Greg Kroah-Hartman 1 0.23% 1 1.72%
Mike Travis 1 0.23% 1 1.72%
Chuck Ebbert 1 0.23% 1 1.72%
Total 426 58


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPU_H
#define _ASM_X86_CPU_H

#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
#include <asm/ibt.h>

#ifdef CONFIG_SMP

extern void prefill_possible_map(void);

#else /* CONFIG_SMP */

static inline void prefill_possible_map(void) {}

#define cpu_physical_id(cpu)			boot_cpu_physical_apicid
#define cpu_acpi_id(cpu)			0
#define safe_smp_processor_id()			0

#endif /* CONFIG_SMP */

struct x86_cpu {
	struct cpu cpu;
};

#ifdef CONFIG_HOTPLUG_CPU
extern int arch_register_cpu(int num);
extern void arch_unregister_cpu(int);
extern void start_cpu0(void);
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
extern int _debug_hotplug_cpu(int cpu, int action);
#endif
#endif

extern void ap_init_aperfmperf(void);

int mwait_usable(const struct cpuinfo_x86 *);

unsigned int x86_family(unsigned int sig);
unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL
extern void __init sld_setup(struct cpuinfo_x86 *c);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip);
extern void handle_bus_lock(struct pt_regs *regs);
u8 get_this_hybrid_cpu_type(void);
#else
static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
	return false;
}

static inline bool handle_guest_split_lock(unsigned long ip)
{
	return false;
}

static inline void handle_bus_lock(struct pt_regs *regs) {}

static inline u8 get_this_hybrid_cpu_type(void)
{
	return 0;
}
#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#else
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
#endif

extern __noendbr void cet_disable(void);

struct ucode_cpu_info;

int intel_cpu_collect_info(struct ucode_cpu_info *uci);

static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
					      unsigned int s2, unsigned int p2)
{
	if (s1 != s2)
		return false;

	/* Processor flags are either both 0 ... */
	if (!p1 && !p2)
		return true;

	/* ... or they intersect. */
	return p1 & p2;
}

extern u64 x86_read_arch_cap_msr(void);
int intel_find_matching_signature(void *mc, unsigned int csig, int cpf);
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);

#endif /* _ASM_X86_CPU_H */