Contributors: 15
Author Tokens Token Proportion Commits Commit Proportion
Russell King 123 33.79% 14 43.75%
Marc Zyngier 109 29.95% 1 3.12%
Stephen Boyd 38 10.44% 2 6.25%
Shawn Guo 24 6.59% 2 6.25%
Jens Axboe 14 3.85% 1 3.12%
Nico Pitre 13 3.57% 1 3.12%
Linus Torvalds (pre-git) 11 3.02% 2 6.25%
Arnd Bergmann 7 1.92% 1 3.12%
Yingjoe Chen 7 1.92% 1 3.12%
Jonathan Austin 5 1.37% 1 3.12%
Vladimir Murzin 4 1.10% 1 3.12%
Catalin Marinas 4 1.10% 1 3.12%
Masahiro Yamada 2 0.55% 2 6.25%
Thomas Gleixner 2 0.55% 1 3.12%
Ingo Molnar 1 0.27% 1 3.12%
Total 364 32


/* SPDX-License-Identifier: GPL-2.0-only */
/*
 *  arch/arm/include/asm/smp.h
 *
 *  Copyright (C) 2004-2005 ARM Ltd.
 */
#ifndef __ASM_ARM_SMP_H
#define __ASM_ARM_SMP_H

#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>

#ifndef CONFIG_SMP
# error "<asm/smp.h> included in non-SMP build"
#endif

#define raw_smp_processor_id() (current_thread_info()->cpu)

struct seq_file;

/*
 * generate IPI list text
 */
extern void show_ipi_list(struct seq_file *, int);

/*
 * Called from assembly code, this handles an IPI.
 */
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);

/*
 * Called from C code, this handles an IPI.
 */
void handle_IPI(int ipinr, struct pt_regs *regs);

/*
 * Setup the set of possible CPUs (via set_cpu_possible)
 */
extern void smp_init_cpus(void);


/*
 * Provide a function to raise an IPI cross call on CPUs in callmap.
 */
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));

/*
 * Called from platform specific assembly code, this is the
 * secondary CPU entry point.
 */
asmlinkage void secondary_start_kernel(void);


/*
 * Initial data for bringing up a secondary CPU.
 */
struct secondary_data {
	union {
		struct mpu_rgn_info *mpu_rgn_info;
		u64 pgdir;
	};
	unsigned long swapper_pg_dir;
	void *stack;
};
extern struct secondary_data secondary_data;
extern void secondary_startup(void);
extern void secondary_startup_arm(void);

extern int __cpu_disable(void);

extern void __cpu_die(unsigned int cpu);

extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);

extern int register_ipi_completion(struct completion *completion, int cpu);

struct smp_operations {
#ifdef CONFIG_SMP
	/*
	 * Setup the set of possible CPUs (via set_cpu_possible)
	 */
	void (*smp_init_cpus)(void);
	/*
	 * Initialize cpu_possible map, and enable coherency
	 */
	void (*smp_prepare_cpus)(unsigned int max_cpus);

	/*
	 * Perform platform specific initialisation of the specified CPU.
	 */
	void (*smp_secondary_init)(unsigned int cpu);
	/*
	 * Boot a secondary CPU, and assign it the specified idle task.
	 * This also gives us the initial stack to use for this CPU.
	 */
	int  (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
#ifdef CONFIG_HOTPLUG_CPU
	int  (*cpu_kill)(unsigned int cpu);
	void (*cpu_die)(unsigned int cpu);
	bool  (*cpu_can_disable)(unsigned int cpu);
	int  (*cpu_disable)(unsigned int cpu);
#endif
#endif
};

struct of_cpu_method {
	const char *method;
	const struct smp_operations *ops;
};

#define CPU_METHOD_OF_DECLARE(name, _method, _ops)			\
	static const struct of_cpu_method __cpu_method_of_table_##name	\
		__used __section(__cpu_method_of_table)			\
		= { .method = _method, .ops = _ops }
/*
 * set platform specific SMP operations
 */
extern void smp_set_ops(const struct smp_operations *);

#endif /* ifndef __ASM_ARM_SMP_H */