Release 4.12 include/linux/cpuset.h
#ifndef _LINUX_CPUSET_H
#define _LINUX_CPUSET_H
/*
* cpuset interface
*
* Copyright (C) 2003 BULL SA
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
*
*/
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/sched/task.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 15 | 93.75% | 1 | 50.00% |
Vlastimil Babka | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
return static_key_count(&cpusets_enabled_key.key) + 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 19 | 90.48% | 1 | 50.00% |
Vlastimil Babka | 2 | 9.52% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void cpuset_inc(void)
{
static_branch_inc(&cpusets_enabled_key);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 14 | 93.33% | 1 | 50.00% |
Vlastimil Babka | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static inline void cpuset_dec(void)
{
static_branch_dec(&cpusets_enabled_key);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 14 | 93.33% | 1 | 50.00% |
Vlastimil Babka | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
if (cpusets_enabled())
return __cpuset_node_allowed(node, gfp_mask);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 15 | 51.72% | 1 | 20.00% |
Vlastimil Babka | 9 | 31.03% | 1 | 20.00% |
Vladimir Davydov | 2 | 6.90% | 1 | 20.00% |
Paul Jackson | 2 | 6.90% | 1 | 20.00% |
Mel Gorman | 1 | 3.45% | 1 | 20.00% |
Total | 29 | 100.00% | 5 | 100.00% |
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 19 | 73.08% | 1 | 33.33% |
David Rientjes | 4 | 15.38% | 1 | 33.33% |
Vlastimil Babka | 3 | 11.54% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
if (cpusets_enabled())
return __cpuset_zone_allowed(z, gfp_mask);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlastimil Babka | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
__cpuset_memory_pressure_bump(); \
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
return task_spread_page(current);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 12 | 80.00% | 1 | 50.00% |
Li Zefan | 3 | 20.00% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static inline int cpuset_do_slab_mem_spread(void)
{
return task_spread_slab(current);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 12 | 80.00% | 1 | 50.00% |
Li Zefan | 3 | 20.00% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
extern int current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
extern void cpuset_print_current_mems_allowed(void);
/*
* read_mems_allowed_begin is required when making decisions involving
* mems_allowed such as during page allocation. mems_allowed can be updated in
* parallel and depending on the new value an operation can fail potentially
* causing process failure. A retry loop with read_mems_allowed_begin and
* read_mems_allowed_retry prevents these artificial failures.
*/
static inline unsigned int read_mems_allowed_begin(void)
{
if (!cpusets_enabled())
return 0;
return read_seqcount_begin(¤t->mems_allowed_seq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 19 | 67.86% | 3 | 75.00% |
Miao Xie | 9 | 32.14% | 1 | 25.00% |
Total | 28 | 100.00% | 4 | 100.00% |
/*
* If this returns true, the operation that took place after
* read_mems_allowed_begin may have failed artificially due to a concurrent
* update of mems_allowed. It is up to the caller to retry the operation if
* appropriate.
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
if (!cpusets_enabled())
return false;
return read_seqcount_retry(¤t->mems_allowed_seq, seq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 21 | 67.74% | 3 | 75.00% |
Miao Xie | 10 | 32.26% | 1 | 25.00% |
Total | 31 | 100.00% | 4 | 100.00% |
static inline void set_mems_allowed(nodemask_t nodemask)
{
unsigned long flags;
task_lock(current);
local_irq_save(flags);
write_seqcount_begin(¤t->mems_allowed_seq);
current->mems_allowed = nodemask;
write_seqcount_end(¤t->mems_allowed_seq);
local_irq_restore(flags);
task_unlock(current);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Miao Xie | 20 | 35.71% | 2 | 40.00% |
Mel Gorman | 16 | 28.57% | 1 | 20.00% |
John Stultz | 14 | 25.00% | 1 | 20.00% |
Paul Jackson | 6 | 10.71% | 1 | 20.00% |
Total | 56 | 100.00% | 5 | 100.00% |
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline int cpuset_init(void) { return 0; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline void cpuset_init_smp(void) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 8 | 100.00% | 1 | 100.00% |
Total | 8 | 100.00% | 1 | 100.00% |
static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 17 | 94.44% | 1 | 50.00% |
Rakib Mullick | 1 | 5.56% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
cpumask_copy(mask, cpu_possible_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 11 | 45.83% | 1 | 25.00% |
Mike Travis | 6 | 25.00% | 1 | 25.00% |
Rusty Russell | 5 | 20.83% | 1 | 25.00% |
Li Zefan | 2 | 8.33% | 1 | 25.00% |
Total | 24 | 100.00% | 4 | 100.00% |
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 9 | 81.82% | 1 | 50.00% |
Peter Zijlstra | 2 | 18.18% | 1 | 50.00% |
Total | 11 | 100.00% | 2 | 100.00% |
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
#define cpuset_current_mems_allowed (node_states[N_MEMORY])
static inline void cpuset_init_current_mems_allowed(void) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 8 | 100.00% | 1 | 100.00% |
Total | 8 | 100.00% | 1 | 100.00% |
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 11 | 78.57% | 1 | 50.00% |
Mel Gorman | 3 | 21.43% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 11 | 68.75% | 1 | 33.33% |
Vlastimil Babka | 4 | 25.00% | 1 | 33.33% |
Vladimir Davydov | 1 | 6.25% | 1 | 33.33% |
Total | 16 | 100.00% | 3 | 100.00% |
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlastimil Babka | 16 | 88.89% | 1 | 50.00% |
David Rientjes | 2 | 11.11% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 14 | 77.78% | 2 | 40.00% |
Vlastimil Babka | 2 | 11.11% | 1 | 20.00% |
Al Viro | 1 | 5.56% | 1 | 20.00% |
Vladimir Davydov | 1 | 5.56% | 1 | 20.00% |
Total | 18 | 100.00% | 5 | 100.00% |
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 14 | 63.64% | 1 | 50.00% |
David Rientjes | 8 | 36.36% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
static inline void cpuset_memory_pressure_bump(void) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 8 | 100.00% | 1 | 100.00% |
Total | 8 | 100.00% | 1 | 100.00% |
static inline void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 9 | 56.25% | 1 | 50.00% |
Eric W. Biedermann | 7 | 43.75% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline int cpuset_mem_spread_node(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline int cpuset_slab_spread_node(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jack Steiner | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline int cpuset_do_page_mem_spread(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline int cpuset_do_slab_mem_spread(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline int current_cpuset_is_being_rebound(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Menage | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
static inline void rebuild_sched_domains(void)
{
partition_sched_domains(1, NULL, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 18 | 100.00% | 2 | 100.00% |
Total | 18 | 100.00% | 2 | 100.00% |
static inline void cpuset_print_current_mems_allowed(void)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 8 | 100.00% | 2 | 100.00% |
Total | 8 | 100.00% | 2 | 100.00% |
static inline void set_mems_allowed(nodemask_t nodemask)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Miao Xie | 9 | 100.00% | 1 | 100.00% |
Total | 9 | 100.00% | 1 | 100.00% |
static inline unsigned int read_mems_allowed_begin(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 8 | 61.54% | 2 | 66.67% |
Miao Xie | 5 | 38.46% | 1 | 33.33% |
Total | 13 | 100.00% | 3 | 100.00% |
static inline bool read_mems_allowed_retry(unsigned int seq)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 10 | 71.43% | 2 | 66.67% |
Miao Xie | 4 | 28.57% | 1 | 33.33% |
Total | 14 | 100.00% | 3 | 100.00% |
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Jackson | 324 | 36.61% | 11 | 25.00% |
Mel Gorman | 162 | 18.31% | 5 | 11.36% |
Vlastimil Babka | 72 | 8.14% | 1 | 2.27% |
David Rientjes | 66 | 7.46% | 4 | 9.09% |
Miao Xie | 60 | 6.78% | 2 | 4.55% |
Maksim Krasnyanskiy | 25 | 2.82% | 2 | 4.55% |
Li Zefan | 24 | 2.71% | 3 | 6.82% |
Tejun Heo | 23 | 2.60% | 1 | 2.27% |
Paul Menage | 20 | 2.26% | 1 | 2.27% |
Jack Steiner | 19 | 2.15% | 1 | 2.27% |
Oleg Nesterov | 18 | 2.03% | 1 | 2.27% |
John Stultz | 14 | 1.58% | 1 | 2.27% |
Al Viro | 13 | 1.47% | 2 | 4.55% |
Eric W. Biedermann | 13 | 1.47% | 1 | 2.27% |
Mike Travis | 10 | 1.13% | 1 | 2.27% |
Ingo Molnar | 6 | 0.68% | 2 | 4.55% |
Rusty Russell | 5 | 0.56% | 1 | 2.27% |
Vladimir Davydov | 5 | 0.56% | 1 | 2.27% |
Peter Zijlstra | 3 | 0.34% | 1 | 2.27% |
Rakib Mullick | 2 | 0.23% | 1 | 2.27% |
Lai Jiangshan | 1 | 0.11% | 1 | 2.27% |
Total | 885 | 100.00% | 44 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.