Contributors: 13
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Oleg Nesterov |
85 |
45.21% |
3 |
14.29% |
Jens Axboe |
29 |
15.43% |
3 |
14.29% |
Roland McGrath |
17 |
9.04% |
3 |
14.29% |
Christopher Yeoh |
12 |
6.38% |
1 |
4.76% |
Waiman Long |
10 |
5.32% |
1 |
4.76% |
Frédéric Weisbecker |
9 |
4.79% |
2 |
9.52% |
Al Viro |
8 |
4.26% |
2 |
9.52% |
Eric W. Biedermann |
7 |
3.72% |
1 |
4.76% |
Andrew Morton |
5 |
2.66% |
1 |
4.76% |
Sebastian Andrzej Siewior |
2 |
1.06% |
1 |
4.76% |
Adrian Bunk |
2 |
1.06% |
1 |
4.76% |
Ingo Molnar |
1 |
0.53% |
1 |
4.76% |
Greg Kroah-Hartman |
1 |
0.53% |
1 |
4.76% |
Total |
188 |
|
21 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TASK_WORK_H
#define _LINUX_TASK_WORK_H
#include <linux/list.h>
#include <linux/sched.h>
typedef void (*task_work_func_t)(struct callback_head *);
static inline void
init_task_work(struct callback_head *twork, task_work_func_t func)
{
twork->func = func;
}
enum task_work_notify_mode {
TWA_NONE = 0,
TWA_RESUME,
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT,
TWA_FLAGS = 0xff00,
TWAF_NO_ALLOC = 0x0100,
};
static inline bool task_work_pending(struct task_struct *task)
{
return READ_ONCE(task->task_works);
}
int task_work_add(struct task_struct *task, struct callback_head *twork,
enum task_work_notify_mode mode);
struct callback_head *task_work_cancel_match(struct task_struct *task,
bool (*match)(struct callback_head *, void *data), void *data);
struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
void task_work_run(void);
static inline void exit_task_work(struct task_struct *task)
{
task_work_run();
}
#endif /* _LINUX_TASK_WORK_H */