Release 4.10 kernel/sched/auto_group.h
#ifdef CONFIG_SCHED_AUTOGROUP
#include <linux/kref.h>
#include <linux/rwsem.h>
struct autogroup {
/*
* reference doesn't mean how many thread attach to this
* autogroup now. It just stands for the number of task
* could use this autogroup.
*/
struct kref kref;
struct task_group *tg;
struct rw_semaphore lock;
unsigned long id;
int nice;
};
extern void autogroup_init(struct task_struct *init_task);
extern void autogroup_free(struct task_group *tg);
static inline bool task_group_is_autogroup(struct task_group *tg)
{
return !!tg->autogroup;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter zijlstra | peter zijlstra | 9 | 47.37% | 1 | 33.33% |
yong zhang | yong zhang | 8 | 42.11% | 1 | 33.33% |
mike galbraith | mike galbraith | 2 | 10.53% | 1 | 33.33% |
| Total | 19 | 100.00% | 3 | 100.00% |
extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
if (enabled && task_wants_autogroup(p, tg))
return p->signal->autogroup->tg;
return tg;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter zijlstra | peter zijlstra | 23 | 46.00% | 1 | 20.00% |
mike galbraith | mike galbraith | 15 | 30.00% | 1 | 20.00% |
ingo molnar | ingo molnar | 9 | 18.00% | 1 | 20.00% |
yong zhang | yong zhang | 2 | 4.00% | 1 | 20.00% |
jason low | jason low | 1 | 2.00% | 1 | 20.00% |
| Total | 50 | 100.00% | 5 | 100.00% |
extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
#else /* !CONFIG_SCHED_AUTOGROUP */
static inline void autogroup_init(struct task_struct *init_task) { }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mike galbraith | mike galbraith | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
static inline void autogroup_free(struct task_group *tg) { }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mike galbraith | mike galbraith | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
static inline bool task_group_is_autogroup(struct task_group *tg)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mike galbraith | mike galbraith | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
return tg;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mike galbraith | mike galbraith | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mike galbraith | mike galbraith | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
#endif
#endif /* CONFIG_SCHED_AUTOGROUP */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mike galbraith | mike galbraith | 138 | 55.65% | 2 | 33.33% |
peter zijlstra | peter zijlstra | 88 | 35.48% | 1 | 16.67% |
yong zhang | yong zhang | 11 | 4.44% | 1 | 16.67% |
ingo molnar | ingo molnar | 10 | 4.03% | 1 | 16.67% |
jason low | jason low | 1 | 0.40% | 1 | 16.67% |
| Total | 248 | 100.00% | 6 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.