Contributors: 43
Author Tokens Token Proportion Commits Commit Proportion
Jeremy Fitzhardinge 180 20.48% 5 5.00%
Glauber de Oliveira Costa 116 13.20% 16 16.00%
Alex Nixon 65 7.39% 3 3.00%
Mike Travis 61 6.94% 9 9.00%
Thomas Gleixner 53 6.03% 7 7.00%
Borislav Petkov 44 5.01% 2 2.00%
Linus Torvalds (pre-git) 43 4.89% 6 6.00%
Jens Axboe 30 3.41% 1 1.00%
Yi Wang 30 3.41% 1 1.00%
Yinghai Lu 26 2.96% 1 1.00%
Vitaly Kuznetsov 23 2.62% 4 4.00%
Andi Kleen 20 2.28% 5 5.00%
Rusty Russell 20 2.28% 3 3.00%
Ashok Raj 17 1.93% 2 2.00%
Tejun Heo 14 1.59% 2 2.00%
Don Zickus 13 1.48% 1 1.00%
Alok N Kataria 12 1.37% 1 1.00%
Vladislav Zolotarov 11 1.25% 1 1.00%
Andrew Morton 11 1.25% 3 3.00%
Tim Chen 10 1.14% 1 1.00%
Eric W. Biedermann 10 1.14% 2 2.00%
Hidehiro Kawai 9 1.02% 1 1.00%
Hiroshi Shimamoto 5 0.57% 1 1.00%
Dou Liyang 5 0.57% 1 1.00%
Boris Ostrovsky 5 0.57% 2 2.00%
Konrad Rzeszutek Wilk 5 0.57% 1 1.00%
Miroslav Benes 4 0.46% 1 1.00%
Peter Zijlstra 4 0.46% 1 1.00%
H. Peter Anvin 3 0.34% 1 1.00%
Paul E. McKenney 3 0.34% 1 1.00%
Fernando Luis Vázquez Cao 3 0.34% 1 1.00%
Len Brown 3 0.34% 1 1.00%
Yanmin Zhang 3 0.34% 1 1.00%
Eduardo Pereira Habkost 3 0.34% 1 1.00%
Jaswinder Singh Rajput 3 0.34% 2 2.00%
James Cleverdon 2 0.23% 1 1.00%
Fenghua Yu 2 0.23% 1 1.00%
Rafael J. Wysocki 2 0.23% 1 1.00%
Prarit Bhargava 2 0.23% 1 1.00%
Chuck Ebbert 1 0.11% 1 1.00%
Alexey Y. Starikovskiy 1 0.11% 1 1.00%
Suresh B. Siddha 1 0.11% 1 1.00%
Greg Kroah-Hartman 1 0.11% 1 1.00%
Total 879 100


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_SMP_H
#define _ASM_X86_SMP_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>

#include <asm/cpumask.h>
#include <asm/current.h>
#include <asm/thread_info.h>

extern int smp_num_siblings;
extern unsigned int num_processors;

DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
/* cpus sharing the last level cache: */
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);

DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
#endif

struct task_struct;

struct smp_ops {
	void (*smp_prepare_boot_cpu)(void);
	void (*smp_prepare_cpus)(unsigned max_cpus);
	void (*smp_cpus_done)(unsigned max_cpus);

	void (*stop_other_cpus)(int wait);
	void (*crash_stop_other_cpus)(void);
	void (*smp_send_reschedule)(int cpu);

	int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
	int (*cpu_disable)(void);
	void (*cpu_die)(unsigned int cpu);
	void (*play_dead)(void);

	void (*send_call_func_ipi)(const struct cpumask *mask);
	void (*send_call_func_single_ipi)(int cpu);
};

/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);

#ifdef CONFIG_SMP
extern struct smp_ops smp_ops;

static inline void smp_send_stop(void)
{
	smp_ops.stop_other_cpus(0);
}

static inline void stop_other_cpus(void)
{
	smp_ops.stop_other_cpus(1);
}

static inline void smp_prepare_boot_cpu(void)
{
	smp_ops.smp_prepare_boot_cpu();
}

static inline void smp_prepare_cpus(unsigned int max_cpus)
{
	smp_ops.smp_prepare_cpus(max_cpus);
}

static inline void smp_cpus_done(unsigned int max_cpus)
{
	smp_ops.smp_cpus_done(max_cpus);
}

static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
	return smp_ops.cpu_up(cpu, tidle);
}

static inline int __cpu_disable(void)
{
	return smp_ops.cpu_disable();
}

static inline void __cpu_die(unsigned int cpu)
{
	smp_ops.cpu_die(cpu);
}

static inline void play_dead(void)
{
	smp_ops.play_dead();
}

static inline void smp_send_reschedule(int cpu)
{
	smp_ops.smp_send_reschedule(cpu);
}

static inline void arch_send_call_function_single_ipi(int cpu)
{
	smp_ops.send_call_func_single_ipi(cpu);
}

static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_ops.send_call_func_ipi(mask);
}

void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
void smp_prepare_cpus_common(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu);
void hlt_play_dead(void);
void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
void cond_wakeup_cpu0(void);

void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);

void smp_store_boot_cpu_info(void);
void smp_store_cpu_info(int id);

asmlinkage __visible void smp_reboot_interrupt(void);
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
__visible void smp_call_function_interrupt(struct pt_regs *regs);
__visible void smp_call_function_single_interrupt(struct pt_regs *r);

#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
#define cpu_acpi_id(cpu)	per_cpu(x86_cpu_to_acpiid, cpu)

/*
 * This function is needed by all SMP systems. It must _always_ be valid
 * from the initial startup.
 */
#define raw_smp_processor_id()  this_cpu_read(pcpu_hot.cpu_number)
#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number)

#ifdef CONFIG_X86_32
extern int safe_smp_processor_id(void);
#else
# define safe_smp_processor_id()	smp_processor_id()
#endif

static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
	return per_cpu(cpu_llc_shared_map, cpu);
}

static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
{
	return per_cpu(cpu_l2c_shared_map, cpu);
}

#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu)     wbinvd()
static inline int wbinvd_on_all_cpus(void)
{
	wbinvd();
	return 0;
}

static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
	return (struct cpumask *)cpumask_of(0);
}
#endif /* CONFIG_SMP */

extern unsigned disabled_cpus;

#ifdef CONFIG_X86_LOCAL_APIC
extern int hard_smp_processor_id(void);

#else /* CONFIG_X86_LOCAL_APIC */
#define hard_smp_processor_id()	0
#endif /* CONFIG_X86_LOCAL_APIC */

#ifdef CONFIG_DEBUG_NMI_SELFTEST
extern void nmi_selftest(void);
#else
#define nmi_selftest() do { } while (0)
#endif

#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMP_H */