Contributors: 62
Author Tokens Token Proportion Commits Commit Proportion
Thomas Gleixner 221 18.51% 15 11.81%
Ashok Raj 64 5.36% 3 2.36%
Qais Yousef 62 5.19% 7 5.51%
Andrew Morton 58 4.86% 8 6.30%
Rafael J. Wysocki 56 4.69% 4 3.15%
Nicholas Piggin 53 4.44% 3 2.36%
Sudeep Holla 48 4.02% 4 3.15%
Andi Kleen 47 3.94% 4 3.15%
Rusty Russell 41 3.43% 7 5.51%
Kay Sievers 33 2.76% 2 1.57%
Christian Krafft 26 2.18% 1 0.79%
Josh Poimboeuf 26 2.18% 2 1.57%
Nathan Fontenot 25 2.09% 1 0.79%
Daniel Lezcano 24 2.01% 2 1.57%
Rik Van Riel 24 2.01% 1 0.79%
Liu Shuo 23 1.93% 1 0.79%
Toshi Kani 23 1.93% 1 0.79%
Mike Travis 22 1.84% 1 0.79%
Pawan Gupta 20 1.68% 3 2.36%
Sean Christopherson 18 1.51% 1 0.79%
Linus Torvalds (pre-git) 18 1.51% 4 3.15%
Gautham R. Shenoy 18 1.51% 3 2.36%
Vivek Goyal 18 1.51% 1 0.79%
Patrick Mochel 17 1.42% 2 1.57%
Peter Zijlstra 15 1.26% 3 2.36%
Oleg Nesterov 14 1.17% 1 0.79%
David S. Miller 13 1.09% 1 0.79%
Len Brown 11 0.92% 1 0.79%
Waiman Long 10 0.84% 1 0.79%
Chris Metcalf 8 0.67% 1 0.79%
Josh Triplett 8 0.67% 1 0.79%
Srivatsa S. Bhat 8 0.67% 1 0.79%
Andy Grover 8 0.67% 1 0.79%
Paul E. McKenney 8 0.67% 1 0.79%
Benjamin Herrenschmidt 8 0.67% 1 0.79%
Jiri Kosina 7 0.59% 1 0.79%
Russell King 7 0.59% 3 2.36%
Vineela Tummalapalli 6 0.50% 1 0.79%
James Morse 6 0.50% 2 1.57%
Tyler Hicks 6 0.50% 1 0.79%
Ben Hutchings 5 0.42% 1 0.79%
Nico Pitre 5 0.42% 1 0.79%
Ingo Molnar 5 0.42% 2 1.57%
Igor Mammedov 4 0.34% 1 0.79%
Greg Kroah-Hartman 4 0.34% 3 2.36%
Manfred Spraul 4 0.34% 1 0.79%
Konrad Rzeszutek Wilk 3 0.25% 1 0.79%
Alexandre Chartre 3 0.25% 1 0.79%
Kamezawa Hiroyuki 3 0.25% 1 0.79%
Michael Ellerman 3 0.25% 1 0.79%
Jonathan Corbet 3 0.25% 1 0.79%
Joe Perches 3 0.25% 1 0.79%
Anton Vorontsov 3 0.25% 1 0.79%
Borislav Petkov 3 0.25% 1 0.79%
Arnd Bergmann 3 0.25% 1 0.79%
Frédéric Weisbecker 3 0.25% 1 0.79%
Pavel Machek 3 0.25% 1 0.79%
Suresh B. Siddha 2 0.17% 1 0.79%
Nicolas Iooss 1 0.08% 1 0.79%
Robert P. J. Day 1 0.08% 1 0.79%
Guenter Roeck 1 0.08% 1 0.79%
Linus Torvalds 1 0.08% 1 0.79%
Total 1194 127


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * include/linux/cpu.h - generic cpu definition
 *
 * This is mainly for topological representation. We define the 
 * basic 'struct cpu' here, which can be embedded in per-arch 
 * definitions of processors.
 *
 * Basic handling of the devices is done in drivers/base/cpu.c
 *
 * CPUs are exported via sysfs in the devices/system/cpu
 * directory. 
 */
#ifndef _LINUX_CPU_H_
#define _LINUX_CPU_H_

#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
#include <linux/cpu_smt.h>

struct device;
struct device_node;
struct attribute_group;

struct cpu {
	int node_id;		/* The node which contains the CPU */
	int hotpluggable;	/* creates sysfs control file if hotpluggable */
	struct device dev;
};

extern void boot_cpu_init(void);
extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);

extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
					      int cpu, unsigned int *thread);

extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);

extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);

extern ssize_t cpu_show_meltdown(struct device *dev,
				 struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v1(struct device *dev,
				   struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
				   struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
					  struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
			     struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mds(struct device *dev,
			    struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
					struct device_attribute *attr,
					char *buf);
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
				      struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
					struct device_attribute *attr,
					char *buf);
extern ssize_t cpu_show_retbleed(struct device *dev,
				 struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
					     struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_gds(struct device *dev,
			    struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
					       struct device_attribute *attr, char *buf);

extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
				 const struct attribute_group **groups,
				 const char *fmt, ...);
extern bool arch_cpu_is_hotpluggable(int cpu);
extern int arch_register_cpu(int cpu);
extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif

#ifdef CONFIG_GENERIC_CPU_DEVICES
DECLARE_PER_CPU(struct cpu, cpu_devices);
#endif

/*
 * These states are not related to the core CPU hotplug mechanism. They are
 * used by various (sub)architectures to track internal state
 */
#define CPU_ONLINE		0x0002 /* CPU is up */
#define CPU_UP_PREPARE		0x0003 /* CPU coming up */
#define CPU_DEAD		0x0007 /* CPU dead */
#define CPU_DEAD_FROZEN		0x0008 /* CPU timed out on unplug */
#define CPU_POST_DEAD		0x0009 /* CPU successfully unplugged */
#define CPU_BROKEN		0x000B /* CPU did not die properly */

#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
int add_cpu(unsigned int cpu);
int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
void bringup_nonboot_cpus(unsigned int max_cpus);

#else	/* CONFIG_SMP */
#define cpuhp_tasks_frozen	0

static inline void cpu_maps_update_begin(void)
{
}

static inline void cpu_maps_update_done(void)
{
}

static inline int add_cpu(unsigned int cpu) { return 0;}

#endif /* CONFIG_SMP */
extern const struct bus_type cpu_subsys;

extern int lockdep_is_cpus_held(void);

#ifdef CONFIG_HOTPLUG_CPU
extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
extern int  cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int remove_cpu(unsigned int cpu);
int cpu_device_down(struct device *dev);
extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);

#else /* CONFIG_HOTPLUG_CPU */

static inline void cpus_write_lock(void) { }
static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { }
static inline int  cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif	/* !CONFIG_HOTPLUG_CPU */

DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())

#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);

static inline int suspend_disable_secondary_cpus(void)
{
	int cpu = 0;

	if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
		cpu = -1;

	return freeze_secondary_cpus(cpu);
}
static inline void suspend_enable_secondary_cpus(void)
{
	return thaw_secondary_cpus();
}

#else /* !CONFIG_PM_SLEEP_SMP */
static inline void thaw_secondary_cpus(void) {}
static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */

void __noreturn cpu_startup_entry(enum cpuhp_state state);

void cpu_idle_poll_ctrl(bool enable);

bool cpu_in_idle(unsigned long pc);

void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_tick_broadcast_enter(void);
void arch_tick_broadcast_exit(void);
void __noreturn arch_cpu_idle_dead(void);

#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
void arch_cpu_finalize_init(void);
#else
static inline void arch_cpu_finalize_init(void) { }
#endif

void play_idle_precise(u64 duration_ns, u64 latency_ns);

static inline void play_idle(unsigned long duration_us)
{
	play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
}

#ifdef CONFIG_HOTPLUG_CPU
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
#else
static inline bool cpu_mitigations_off(void)
{
	return true;
}
static inline bool cpu_mitigations_auto_nosmt(void)
{
	return false;
}
#endif

#endif /* _LINUX_CPU_H_ */