Contributors: 21
Author Tokens Token Proportion Commits Commit Proportion
Ralf Baechle 182 46.79% 9 23.08%
Paul Burton 46 11.83% 2 5.13%
Linus Torvalds 23 5.91% 1 2.56%
Linus Torvalds (pre-git) 21 5.40% 4 10.26%
Jens Axboe 19 4.88% 1 2.56%
Dengcheng Zhu 17 4.37% 1 2.56%
Matt Redfearn 15 3.86% 2 5.13%
Andrew Morton 14 3.60% 3 7.69%
Rusty Russell 13 3.34% 1 2.56%
Jiaxun Yang 7 1.80% 2 5.13%
Arnd Bergmann 7 1.80% 1 2.56%
David Daney 6 1.54% 2 5.13%
Huacai Chen 5 1.29% 1 2.56%
James Hogan 4 1.03% 2 5.13%
Markos Chandras 4 1.03% 1 2.56%
Baoquan He 1 0.26% 1 2.56%
Ingo Molnar 1 0.26% 1 2.56%
Liangliang Huang 1 0.26% 1 2.56%
Josh Poimboeuf 1 0.26% 1 2.56%
Valentin Schneider 1 0.26% 1 2.56%
Joe Perches 1 0.26% 1 2.56%
Total 389 39


/*
 * This file is subject to the terms and conditions of the GNU General
 * Public License.  See the file "COPYING" in the main directory of this
 * archive for more details.
 *
 * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
 * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
 * Copyright (C) 2000, 2001, 2002 Ralf Baechle
 * Copyright (C) 2000, 2001 Broadcom Corporation
 */
#ifndef __ASM_SMP_H
#define __ASM_SMP_H

#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/cpumask.h>

#include <asm/smp-ops.h>

extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];

static inline int raw_smp_processor_id(void)
{
#if defined(__VDSO__)
	extern int vdso_smp_processor_id(void)
		__compiletime_error("VDSO should not call smp_processor_id()");
	return vdso_smp_processor_id();
#else
	return current_thread_info()->cpu;
#endif
}
#define raw_smp_processor_id raw_smp_processor_id

/* Map from cpu id to sequential logical cpu number.  This will only
   not be idempotent when cpus failed to come on-line.	*/
extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];
#define cpu_number_map(cpu)  __cpu_number_map[cpu]

/* The reverse map from sequential logical cpu number to cpu id.  */
extern int __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu)  __cpu_logical_map[cpu]

#define NO_PROC_ID	(-1)

#define SMP_RESCHEDULE_YOURSELF 0x1	/* XXX braindead */
#define SMP_CALL_FUNCTION	0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH	0x4

/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;

extern unsigned int smp_max_threads __initdata;

extern asmlinkage void smp_bootstrap(void);

extern void calculate_cpu_foreign_map(void);

asmlinkage void start_secondary(void);

/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
static inline void arch_smp_send_reschedule(int cpu)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
}

#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	return mp_ops->cpu_disable();
}

static inline void __cpu_die(unsigned int cpu)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	mp_ops->cpu_die(cpu);
}

extern void __noreturn play_dead(void);
#endif

#ifdef CONFIG_KEXEC_CORE
static inline void kexec_nonboot_cpu(void)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	return mp_ops->kexec_nonboot_cpu();
}

static inline void *kexec_nonboot_cpu_func(void)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	return mp_ops->kexec_nonboot_cpu;
}
#endif

/*
 * This function will set up the necessary IPIs for Linux to communicate
 * with the CPUs in mask.
 * Return 0 on success.
 */
int mips_smp_ipi_allocate(const struct cpumask *mask);

/*
 * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
 * CPUs in mask, which must be a subset of the IPIs that have been configured.
 * Return 0 on success.
 */
int mips_smp_ipi_free(const struct cpumask *mask);

static inline void arch_send_call_function_single_ipi(int cpu)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION);
}

static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	extern const struct plat_smp_ops *mp_ops;	/* private */

	mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}

#endif /* __ASM_SMP_H */