Release 4.11 kernel/sched/autogroup.h
#ifdef CONFIG_SCHED_AUTOGROUP
#include <linux/kref.h>
#include <linux/rwsem.h>
#include <linux/sched/autogroup.h>
struct autogroup {
/*
* reference doesn't mean how many thread attach to this
* autogroup now. It just stands for the number of task
* could use this autogroup.
*/
struct kref kref;
struct task_group *tg;
struct rw_semaphore lock;
unsigned long id;
int nice;
};
extern void autogroup_init(struct task_struct *init_task);
extern void autogroup_free(struct task_group *tg);
static inline bool task_group_is_autogroup(struct task_group *tg)
{
return !!tg->autogroup;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 9 | 47.37% | 1 | 33.33% |
Yong Zhang | 8 | 42.11% | 1 | 33.33% |
Mike Galbraith | 2 | 10.53% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
if (enabled && task_wants_autogroup(p, tg))
return p->signal->autogroup->tg;
return tg;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 23 | 46.00% | 1 | 20.00% |
Mike Galbraith | 15 | 30.00% | 1 | 20.00% |
Ingo Molnar | 9 | 18.00% | 1 | 20.00% |
Yong Zhang | 2 | 4.00% | 1 | 20.00% |
Jason Low | 1 | 2.00% | 1 | 20.00% |
Total | 50 | 100.00% | 5 | 100.00% |
extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
#else /* !CONFIG_SCHED_AUTOGROUP */
static inline void autogroup_init(struct task_struct *init_task) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Galbraith | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
static inline void autogroup_free(struct task_group *tg) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Galbraith | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
static inline bool task_group_is_autogroup(struct task_group *tg)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Galbraith | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
return tg;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Galbraith | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Galbraith | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#endif
#endif /* CONFIG_SCHED_AUTOGROUP */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Galbraith | 138 | 54.98% | 2 | 28.57% |
Peter Zijlstra | 88 | 35.06% | 1 | 14.29% |
Ingo Molnar | 13 | 5.18% | 2 | 28.57% |
Yong Zhang | 11 | 4.38% | 1 | 14.29% |
Jason Low | 1 | 0.40% | 1 | 14.29% |
Total | 251 | 100.00% | 7 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.