Contributors: 24
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Paul Jackson |
324 |
35.26% |
11 |
22.92% |
Mel Gorman |
139 |
15.13% |
5 |
10.42% |
Vlastimil Babka |
70 |
7.62% |
1 |
2.08% |
David Rientjes |
66 |
7.18% |
4 |
8.33% |
Miao Xie |
60 |
6.53% |
2 |
4.17% |
Peter Zijlstra |
33 |
3.59% |
2 |
4.17% |
Dima Zavin |
28 |
3.05% |
1 |
2.08% |
Maksim Krasnyanskiy |
25 |
2.72% |
2 |
4.17% |
Li Zefan |
24 |
2.61% |
3 |
6.25% |
Tejun Heo |
23 |
2.50% |
1 |
2.08% |
Jack Steiner |
19 |
2.07% |
1 |
2.08% |
Oleg Nesterov |
18 |
1.96% |
1 |
2.08% |
Paul Menage |
17 |
1.85% |
1 |
2.08% |
John Stultz |
14 |
1.52% |
1 |
2.08% |
Eric W. Biedermann |
13 |
1.41% |
1 |
2.08% |
Al Viro |
13 |
1.41% |
2 |
4.17% |
Mike Travis |
10 |
1.09% |
1 |
2.08% |
Ingo Molnar |
6 |
0.65% |
2 |
4.17% |
Vladimir Davydov |
5 |
0.54% |
1 |
2.08% |
Rusty Russell |
5 |
0.54% |
1 |
2.08% |
Yaowei Bai |
3 |
0.33% |
1 |
2.08% |
Rakib Mullick |
2 |
0.22% |
1 |
2.08% |
Greg Kroah-Hartman |
1 |
0.11% |
1 |
2.08% |
Lai Jiangshan |
1 |
0.11% |
1 |
2.08% |
Total |
919 |
|
48 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUSET_H
#define _LINUX_CPUSET_H
/*
* cpuset interface
*
* Copyright (C) 2003 BULL SA
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
*
*/
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/sched/task.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
/*
* Static branch rewrites can happen in an arbitrary order for a given
* key. In code paths where we need to loop with read_mems_allowed_begin() and
* read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
* to ensure that begin() always gets rewritten before retry() in the
* disabled -> enabled transition. If not, then if local irqs are disabled
* around the loop, we can deadlock since retry() would always be
* comparing the latest value of the mems_allowed seqcount against 0 as
* begin() still would see cpusets_enabled() as false. The enabled -> disabled
* transition should happen in reverse order for the same reasons (want to stop
* looking at real value of mems_allowed.sequence in retry() first).
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
}
static inline void cpuset_inc(void)
{
static_branch_inc(&cpusets_pre_enable_key);
static_branch_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
static_branch_dec(&cpusets_enabled_key);
static_branch_dec(&cpusets_pre_enable_key);
}
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
if (cpusets_enabled())
return __cpuset_node_allowed(node, gfp_mask);
return true;
}
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
if (cpusets_enabled())
return __cpuset_zone_allowed(z, gfp_mask);
return true;
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
__cpuset_memory_pressure_bump(); \
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
return task_spread_page(current);
}
static inline int cpuset_do_slab_mem_spread(void)
{
return task_spread_slab(current);
}
extern bool current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
extern void cpuset_print_current_mems_allowed(void);
/*
* read_mems_allowed_begin is required when making decisions involving
* mems_allowed such as during page allocation. mems_allowed can be updated in
* parallel and depending on the new value an operation can fail potentially
* causing process failure. A retry loop with read_mems_allowed_begin and
* read_mems_allowed_retry prevents these artificial failures.
*/
static inline unsigned int read_mems_allowed_begin(void)
{
if (!static_branch_unlikely(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(¤t->mems_allowed_seq);
}
/*
* If this returns true, the operation that took place after
* read_mems_allowed_begin may have failed artificially due to a concurrent
* update of mems_allowed. It is up to the caller to retry the operation if
* appropriate.
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
if (!static_branch_unlikely(&cpusets_enabled_key))
return false;
return read_seqcount_retry(¤t->mems_allowed_seq, seq);
}
static inline void set_mems_allowed(nodemask_t nodemask)
{
unsigned long flags;
task_lock(current);
local_irq_save(flags);
write_seqcount_begin(¤t->mems_allowed_seq);
current->mems_allowed = nodemask;
write_seqcount_end(¤t->mems_allowed_seq);
local_irq_restore(flags);
task_unlock(current);
}
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
static inline void cpuset_force_rebuild(void) { }
static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
static inline void cpuset_wait_for_hotplug(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
{
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
}
#define cpuset_current_mems_allowed (node_states[N_MEMORY])
static inline void cpuset_init_current_mems_allowed(void) {}
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return 1;
}
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
return true;
}
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
}
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{
return 1;
}
static inline void cpuset_memory_pressure_bump(void) {}
static inline void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task)
{
}
static inline int cpuset_mem_spread_node(void)
{
return 0;
}
static inline int cpuset_slab_spread_node(void)
{
return 0;
}
static inline int cpuset_do_page_mem_spread(void)
{
return 0;
}
static inline int cpuset_do_slab_mem_spread(void)
{
return 0;
}
static inline bool current_cpuset_is_being_rebound(void)
{
return false;
}
static inline void rebuild_sched_domains(void)
{
partition_sched_domains(1, NULL, NULL);
}
static inline void cpuset_print_current_mems_allowed(void)
{
}
static inline void set_mems_allowed(nodemask_t nodemask)
{
}
static inline unsigned int read_mems_allowed_begin(void)
{
return 0;
}
static inline bool read_mems_allowed_retry(unsigned int seq)
{
return false;
}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */