Contributors: 59
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Thomas Gleixner |
181 |
18.47% |
12 |
10.34% |
Nicholas Piggin |
53 |
5.41% |
3 |
2.59% |
Andrew Morton |
51 |
5.20% |
8 |
6.90% |
Sudeep Holla |
48 |
4.90% |
4 |
3.45% |
Andi Kleen |
47 |
4.80% |
4 |
3.45% |
Ashok Raj |
43 |
4.39% |
3 |
2.59% |
Kay Sievers |
33 |
3.37% |
2 |
1.72% |
Qais Yousef |
32 |
3.27% |
6 |
5.17% |
Rafael J. Wysocki |
31 |
3.16% |
4 |
3.45% |
Rusty Russell |
30 |
3.06% |
5 |
4.31% |
Christian Krafft |
26 |
2.65% |
1 |
0.86% |
Nathan Fontenot |
25 |
2.55% |
1 |
0.86% |
Rik Van Riel |
24 |
2.45% |
1 |
0.86% |
Daniel Lezcano |
24 |
2.45% |
2 |
1.72% |
Josh Poimboeuf |
23 |
2.35% |
2 |
1.72% |
Mike Travis |
22 |
2.24% |
1 |
0.86% |
Pawan Gupta |
20 |
2.04% |
3 |
2.59% |
Vivek Goyal |
18 |
1.84% |
1 |
0.86% |
Sean Christopherson |
18 |
1.84% |
1 |
0.86% |
Gautham R. Shenoy |
17 |
1.73% |
2 |
1.72% |
Linus Torvalds (pre-git) |
14 |
1.43% |
4 |
3.45% |
David S. Miller |
13 |
1.33% |
1 |
0.86% |
Oleg Nesterov |
12 |
1.22% |
1 |
0.86% |
Patrick Mochel |
12 |
1.22% |
2 |
1.72% |
Len Brown |
11 |
1.12% |
1 |
0.86% |
Benjamin Herrenschmidt |
8 |
0.82% |
1 |
0.86% |
Josh Triplett |
8 |
0.82% |
1 |
0.86% |
Paul E. McKenney |
8 |
0.82% |
1 |
0.86% |
Liu Shuo |
8 |
0.82% |
1 |
0.86% |
Chris Metcalf |
8 |
0.82% |
1 |
0.86% |
Andy Grover |
8 |
0.82% |
1 |
0.86% |
Russell King |
7 |
0.71% |
3 |
2.59% |
Peter Zijlstra |
7 |
0.71% |
2 |
1.72% |
Jiri Kosina |
7 |
0.71% |
1 |
0.86% |
Vineela Tummalapalli |
6 |
0.61% |
1 |
0.86% |
James Morse |
6 |
0.61% |
2 |
1.72% |
Ingo Molnar |
5 |
0.51% |
2 |
1.72% |
Toshi Kani |
5 |
0.51% |
1 |
0.86% |
Ben Hutchings |
5 |
0.51% |
1 |
0.86% |
Nico Pitre |
5 |
0.51% |
1 |
0.86% |
Tyler Hicks |
5 |
0.51% |
1 |
0.86% |
Igor Mammedov |
4 |
0.41% |
1 |
0.86% |
Greg Kroah-Hartman |
4 |
0.41% |
3 |
2.59% |
Konrad Rzeszutek Wilk |
3 |
0.31% |
1 |
0.86% |
Borislav Petkov |
3 |
0.31% |
1 |
0.86% |
Tony Luck |
3 |
0.31% |
1 |
0.86% |
Kamezawa Hiroyuki |
3 |
0.31% |
1 |
0.86% |
Arnd Bergmann |
3 |
0.31% |
1 |
0.86% |
Michael Ellerman |
3 |
0.31% |
1 |
0.86% |
Jonathan Corbet |
3 |
0.31% |
1 |
0.86% |
Joe Perches |
3 |
0.31% |
1 |
0.86% |
Alexandre Chartre |
3 |
0.31% |
1 |
0.86% |
Pavel Machek |
3 |
0.31% |
1 |
0.86% |
Suresh B. Siddha |
2 |
0.20% |
1 |
0.86% |
Manfred Spraul |
2 |
0.20% |
1 |
0.86% |
Guenter Roeck |
1 |
0.10% |
1 |
0.86% |
Linus Torvalds |
1 |
0.10% |
1 |
0.86% |
Robert P. J. Day |
1 |
0.10% |
1 |
0.86% |
Nicolas Iooss |
1 |
0.10% |
1 |
0.86% |
Total |
980 |
|
116 |
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/cpu.h - generic cpu definition
*
* This is mainly for topological representation. We define the
* basic 'struct cpu' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/cpu.c
*
* CPUs are exported via sysfs in the devices/system/cpu
* directory.
*/
#ifndef _LINUX_CPU_H_
#define _LINUX_CPU_H_
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpuhotplug.h>
#include <linux/cpuhplock.h>
#include <linux/cpu_smt.h>
struct device;
struct device_node;
struct attribute_group;
struct cpu {
int node_id; /* The node which contains the CPU */
int hotpluggable; /* creates sysfs control file if hotpluggable */
struct device dev;
};
extern void boot_cpu_init(void);
extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int cpu, unsigned int *thread);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
extern ssize_t cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
struct device_attribute *attr,
char *buf);
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
struct device_attribute *attr,
char *buf);
extern ssize_t cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
extern bool arch_cpu_is_hotpluggable(int cpu);
extern int arch_register_cpu(int cpu);
extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
#ifdef CONFIG_GENERIC_CPU_DEVICES
DECLARE_PER_CPU(struct cpu, cpu_devices);
#endif
/*
* These states are not related to the core CPU hotplug mechanism. They are
* used by various (sub)architectures to track internal state
*/
#define CPU_ONLINE 0x0002 /* CPU is up */
#define CPU_UP_PREPARE 0x0003 /* CPU coming up */
#define CPU_DEAD 0x0007 /* CPU dead */
#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
#define CPU_BROKEN 0x000B /* CPU did not die properly */
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
int add_cpu(unsigned int cpu);
int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
void bringup_nonboot_cpus(unsigned int max_cpus);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
static inline void cpu_maps_update_begin(void)
{
}
static inline void cpu_maps_update_done(void)
{
}
static inline int add_cpu(unsigned int cpu) { return 0;}
#endif /* CONFIG_SMP */
extern const struct bus_type cpu_subsys;
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);
static inline int suspend_disable_secondary_cpus(void)
{
int cpu = 0;
if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
cpu = -1;
return freeze_secondary_cpus(cpu);
}
static inline void suspend_enable_secondary_cpus(void)
{
return thaw_secondary_cpus();
}
#else /* !CONFIG_PM_SLEEP_SMP */
static inline void thaw_secondary_cpus(void) {}
static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_tick_broadcast_enter(void);
void arch_tick_broadcast_exit(void);
void __noreturn arch_cpu_idle_dead(void);
#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
void arch_cpu_finalize_init(void);
#else
static inline void arch_cpu_finalize_init(void) { }
#endif
void play_idle_precise(u64 duration_ns, u64 latency_ns);
static inline void play_idle(unsigned long duration_us)
{
play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
}
#ifdef CONFIG_HOTPLUG_CPU
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
#else
static inline bool cpu_mitigations_off(void)
{
return true;
}
static inline bool cpu_mitigations_auto_nosmt(void)
{
return false;
}
#endif
#endif /* _LINUX_CPU_H_ */