Contributors: 12
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Oleg Nesterov |
85 |
47.75% |
3 |
15.00% |
Jens Axboe |
29 |
16.29% |
3 |
15.00% |
Roland McGrath |
17 |
9.55% |
3 |
15.00% |
Christopher Yeoh |
12 |
6.74% |
1 |
5.00% |
Frédéric Weisbecker |
9 |
5.06% |
2 |
10.00% |
Al Viro |
8 |
4.49% |
2 |
10.00% |
Eric W. Biedermann |
7 |
3.93% |
1 |
5.00% |
Andrew Morton |
5 |
2.81% |
1 |
5.00% |
Adrian Bunk |
2 |
1.12% |
1 |
5.00% |
Sebastian Andrzej Siewior |
2 |
1.12% |
1 |
5.00% |
Ingo Molnar |
1 |
0.56% |
1 |
5.00% |
Greg Kroah-Hartman |
1 |
0.56% |
1 |
5.00% |
Total |
178 |
|
20 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TASK_WORK_H
#define _LINUX_TASK_WORK_H
#include <linux/list.h>
#include <linux/sched.h>
typedef void (*task_work_func_t)(struct callback_head *);
static inline void
init_task_work(struct callback_head *twork, task_work_func_t func)
{
twork->func = func;
}
enum task_work_notify_mode {
TWA_NONE,
TWA_RESUME,
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT,
};
static inline bool task_work_pending(struct task_struct *task)
{
return READ_ONCE(task->task_works);
}
int task_work_add(struct task_struct *task, struct callback_head *twork,
enum task_work_notify_mode mode);
struct callback_head *task_work_cancel_match(struct task_struct *task,
bool (*match)(struct callback_head *, void *data), void *data);
struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
void task_work_run(void);
static inline void exit_task_work(struct task_struct *task)
{
task_work_run();
}
#endif /* _LINUX_TASK_WORK_H */