Contributors: 13
| Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
| Jens Axboe |
259 |
60.09% |
7 |
23.33% |
| Anuj Gupta |
53 |
12.30% |
2 |
6.67% |
| Pavel Begunkov |
46 |
10.67% |
7 |
23.33% |
| Dylan Yudaken |
24 |
5.57% |
1 |
3.33% |
| Thomas Gleixner |
13 |
3.02% |
3 |
10.00% |
| Kanchan Joshi |
9 |
2.09% |
1 |
3.33% |
| Hao Xu |
9 |
2.09% |
3 |
10.00% |
| FUJITA Tomonori |
7 |
1.62% |
1 |
3.33% |
| Andrew Lutomirski |
6 |
1.39% |
1 |
3.33% |
| Clark Williams |
2 |
0.46% |
1 |
3.33% |
| David Mosberger-Tang |
1 |
0.23% |
1 |
3.33% |
| Geert Uytterhoeven |
1 |
0.23% |
1 |
3.33% |
| Christoph Lameter |
1 |
0.23% |
1 |
3.33% |
| Total |
431 |
|
30 |
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_IO_URING_H
#define _LINUX_IO_URING_H
#include <linux/sched.h>
#include <linux/xarray.h>
#include <uapi/linux/io_uring.h>
enum io_uring_cmd_flags {
IO_URING_F_COMPLETE_DEFER = 1,
IO_URING_F_UNLOCKED = 2,
/* the request is executed from poll, it should not be freed */
IO_URING_F_MULTISHOT = 4,
/* executed by io-wq */
IO_URING_F_IOWQ = 8,
/* int's last bit, sign checks are usually faster than a bit test */
IO_URING_F_NONBLOCK = INT_MIN,
/* ctx state flags, for URING_CMD */
IO_URING_F_SQE128 = (1 << 8),
IO_URING_F_CQE32 = (1 << 9),
IO_URING_F_IOPOLL = (1 << 10),
};
struct io_uring_cmd {
struct file *file;
const void *cmd;
union {
/* callback to defer completions to task context */
void (*task_work_cb)(struct io_uring_cmd *cmd);
/* used for polled completion */
void *cookie;
};
u32 cmd_op;
u32 flags;
u8 pdu[32]; /* available inline for free use */
};
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *));
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
static inline void io_uring_files_cancel(void)
{
if (current->io_uring) {
io_uring_unreg_ringfd();
__io_uring_cancel(false);
}
}
static inline void io_uring_task_cancel(void)
{
if (current->io_uring)
__io_uring_cancel(true);
}
static inline void io_uring_free(struct task_struct *tsk)
{
if (tsk->io_uring)
__io_uring_free(tsk);
}
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
{
return -EOPNOTSUPP;
}
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
ssize_t ret2)
{
}
static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *))
{
}
static inline struct sock *io_uring_get_socket(struct file *file)
{
return NULL;
}
static inline void io_uring_task_cancel(void)
{
}
static inline void io_uring_files_cancel(void)
{
}
static inline void io_uring_free(struct task_struct *tsk)
{
}
static inline const char *io_uring_get_opcode(u8 opcode)
{
return "";
}
#endif
#endif