Contributors: 31
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Rusty Russell | 
          94 | 
          13.30% | 
          5 | 
          8.62% | 
        
        
          | David Daney | 
          68 | 
          9.62% | 
          2 | 
          3.45% | 
        
        
          | Linus Torvalds (pre-git) | 
          66 | 
          9.34% | 
          7 | 
          12.07% | 
        
        
          | Andrew Morton | 
          55 | 
          7.78% | 
          7 | 
          12.07% | 
        
        
          | Thomas Gleixner | 
          54 | 
          7.64% | 
          3 | 
          5.17% | 
        
        
          | Ingo Molnar | 
          49 | 
          6.93% | 
          5 | 
          8.62% | 
        
        
          | Jens Axboe | 
          44 | 
          6.22% | 
          1 | 
          1.72% | 
        
        
          | Rik Van Riel | 
          37 | 
          5.23% | 
          1 | 
          1.72% | 
        
        
          | Peter Zijlstra | 
          28 | 
          3.96% | 
          1 | 
          1.72% | 
        
        
          | Richard Weinberger | 
          25 | 
          3.54% | 
          1 | 
          1.72% | 
        
        
          | Juergen Gross | 
          25 | 
          3.54% | 
          1 | 
          1.72% | 
        
        
          | David Howells | 
          20 | 
          2.83% | 
          1 | 
          1.72% | 
        
        
          | Linus Torvalds | 
          17 | 
          2.40% | 
          4 | 
          6.90% | 
        
        
          | Américo Wang | 
          16 | 
          2.26% | 
          1 | 
          1.72% | 
        
        
          | Paul Gortmaker | 
          14 | 
          1.98% | 
          1 | 
          1.72% | 
        
        
          | Chuansheng Liu | 
          14 | 
          1.98% | 
          1 | 
          1.72% | 
        
        
          | Robert Love | 
          12 | 
          1.70% | 
          1 | 
          1.72% | 
        
        
          | Huang Ying | 
          11 | 
          1.56% | 
          1 | 
          1.72% | 
        
        
          | Con Kolivas | 
          11 | 
          1.56% | 
          1 | 
          1.72% | 
        
        
          | Takao Indoh | 
          8 | 
          1.13% | 
          1 | 
          1.72% | 
        
        
          | Christoph Hellwig | 
          7 | 
          0.99% | 
          1 | 
          1.72% | 
        
        
          | Mike Travis | 
          6 | 
          0.85% | 
          1 | 
          1.72% | 
        
        
          | Heiko Carstens | 
          6 | 
          0.85% | 
          2 | 
          3.45% | 
        
        
          | Andi Kleen | 
          5 | 
          0.71% | 
          1 | 
          1.72% | 
        
        
          | Shaohua Li | 
          4 | 
          0.57% | 
          1 | 
          1.72% | 
        
        
          | David S. Miller | 
          3 | 
          0.42% | 
          1 | 
          1.72% | 
        
        
          | Jan Kara | 
          3 | 
          0.42% | 
          1 | 
          1.72% | 
        
        
          | Paul Mackerras | 
          2 | 
          0.28% | 
          1 | 
          1.72% | 
        
        
          | Frédéric Weisbecker | 
          1 | 
          0.14% | 
          1 | 
          1.72% | 
        
        
          | Greg Kroah-Hartman | 
          1 | 
          0.14% | 
          1 | 
          1.72% | 
        
        
          | Rakib Mullick | 
          1 | 
          0.14% | 
          1 | 
          1.72% | 
        
	  
	  
        
          | Total | 
          707 | 
           | 
          58 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SMP_H
#define __LINUX_SMP_H
/*
 *	Generic SMP support
 *		Alan Cox. <alan@redhat.com>
 */
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/llist.h>
typedef void (*smp_call_func_t)(void *info);
struct __call_single_data {
	struct llist_node llist;
	smp_call_func_t func;
	void *info;
	unsigned int flags;
};
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
	__aligned(sizeof(struct __call_single_data));
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
			     int wait);
/*
 * Call a function on all processors
 */
int on_each_cpu(smp_call_func_t func, void *info, int wait);
/*
 * Call a function on processors specified by mask, which might include
 * the local one.
 */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
		void *info, bool wait);
/*
 * Call a function on each processor for which the supplied function
 * cond_func returns a positive value. This may include the local
 * processor.
 */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
		smp_call_func_t func, void *info, bool wait,
		gfp_t gfp_flags);
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
		smp_call_func_t func, void *info, bool wait,
		gfp_t gfp_flags, const struct cpumask *mask);
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
#ifdef CONFIG_SMP
#include <linux/preempt.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/smp.h>
/*
 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
 * (defined in asm header):
 */
/*
 * stops all CPUs but the current one:
 */
extern void smp_send_stop(void);
/*
 * sends a 'reschedule' event to another CPU:
 */
extern void smp_send_reschedule(int cpu);
/*
 * Prepare machine for booting other CPUs.
 */
extern void smp_prepare_cpus(unsigned int max_cpus);
/*
 * Bring a CPU up
 */
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
/*
 * Final polishing of CPUs
 */
extern void smp_cpus_done(unsigned int max_cpus);
/*
 * Call a function on all other processors
 */
int smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
			    smp_call_func_t func, void *info, bool wait);
int smp_call_function_any(const struct cpumask *mask,
			  smp_call_func_t func, void *info, int wait);
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);
/*
 * Generic and arch helpers
 */
void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \
	generic_smp_call_function_single_interrupt
/*
 * Mark the boot cpu "online" so that it can call console drivers in
 * printk() and can access its per-cpu storage.
 */
void smp_prepare_boot_cpu(void);
extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
extern int __boot_cpu_id;
static inline int get_boot_cpu_id(void)
{
	return __boot_cpu_id;
}
#else /* !SMP */
static inline void smp_send_stop(void) { }
/*
 *	These macros fold the SMP functionality into a single CPU system
 */
#define raw_smp_processor_id()			0
static inline int up_smp_call_function(smp_call_func_t func, void *info)
{
	return 0;
}
#define smp_call_function(func, info, wait) \
			(up_smp_call_function(func, info))
static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu()			do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
			(up_smp_call_function(func, info))
static inline void call_function_init(void) { }
static inline int
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
		      void *info, int wait)
{
	return smp_call_function_single(0, func, info, wait);
}
static inline void kick_all_cpus_sync(void) {  }
static inline void wake_up_all_idle_cpus(void) {  }
#ifdef CONFIG_UP_LATE_INIT
extern void __init up_late_init(void);
static inline void smp_init(void) { up_late_init(); }
#else
static inline void smp_init(void) { }
#endif
static inline int get_boot_cpu_id(void)
{
	return 0;
}
#endif /* !SMP */
/*
 * smp_processor_id(): get the current CPU ID.
 *
 * if DEBUG_PREEMPT is enabled then we check whether it is
 * used in a preemption-safe way. (smp_processor_id() is safe
 * if it's used in a preemption-off critical section, or in
 * a thread that is bound to the current CPU.)
 *
 * NOTE: raw_smp_processor_id() is for internal use only
 * (smp_processor_id() is the preferred variant), but in rare
 * instances it might also be used to turn off false positives
 * (i.e. smp_processor_id() use that the debugging code reports but
 * which use for some reason is legal). Don't use this to hack around
 * the warning message, as your code might not work under PREEMPT.
 */
#ifdef CONFIG_DEBUG_PREEMPT
  extern unsigned int debug_smp_processor_id(void);
# define smp_processor_id() debug_smp_processor_id()
#else
# define smp_processor_id() raw_smp_processor_id()
#endif
#define get_cpu()		({ preempt_disable(); smp_processor_id(); })
#define put_cpu()		preempt_enable()
/*
 * Callback to arch code if there's nosmp or maxcpus=0 on the
 * boot command line:
 */
extern void arch_disable_smp_support(void);
extern void arch_enable_nonboot_cpus_begin(void);
extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
		    bool phys);
/* SMP core functions */
int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
#endif /* __LINUX_SMP_H */