Contributors: 3
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jens Axboe |
157 |
84.41% |
14 |
77.78% |
Roman Penyaev |
18 |
9.68% |
1 |
5.56% |
Pavel Begunkov |
11 |
5.91% |
3 |
16.67% |
Total |
186 |
|
18 |
|
#ifndef IO_URING_MEMMAP_H
#define IO_URING_MEMMAP_H
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
void io_pages_free(struct page ***pages, int npages);
int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma,
struct page **pages, int npages);
void *io_pages_map(struct page ***out_pages, unsigned short *npages,
size_t size);
void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
bool put_pages);
void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
unsigned long uaddr, size_t size);
#ifndef CONFIG_MMU
unsigned int io_uring_nommu_mmap_capabilities(struct file *file);
#endif
unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
int io_uring_mmap(struct file *file, struct vm_area_struct *vma);
#endif