Contributors: 19
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jianxin Xiong |
226 |
24.22% |
1 |
2.78% |
Roland Dreier |
217 |
23.26% |
4 |
11.11% |
Jason Gunthorpe |
187 |
20.04% |
8 |
22.22% |
Haggai Eran |
79 |
8.47% |
3 |
8.33% |
Shiraz Saleem |
71 |
7.61% |
1 |
2.78% |
Mike Marciniszyn |
46 |
4.93% |
1 |
2.78% |
Yishai Hadas |
29 |
3.11% |
3 |
8.33% |
Gal Pressman |
19 |
2.04% |
1 |
2.78% |
Maor Gottlieb |
18 |
1.93% |
2 |
5.56% |
Moni Shoua |
16 |
1.71% |
1 |
2.78% |
Steve Wise |
5 |
0.54% |
1 |
2.78% |
Eli Cohen |
4 |
0.43% |
1 |
2.78% |
John Levon |
3 |
0.32% |
2 |
5.56% |
Bryan O'Sullivan |
3 |
0.32% |
2 |
5.56% |
Ram Amrani |
3 |
0.32% |
1 |
2.78% |
Shachar Raindel |
3 |
0.32% |
1 |
2.78% |
Artemy Kovalyov |
2 |
0.21% |
1 |
2.78% |
Leon Romanovsky |
1 |
0.11% |
1 |
2.78% |
Joachim Fenkes |
1 |
0.11% |
1 |
2.78% |
Total |
933 |
|
36 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2007 Cisco Systems. All rights reserved.
* Copyright (c) 2020 Intel Corporation. All rights reserved.
*/
#ifndef IB_UMEM_H
#define IB_UMEM_H
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
struct ib_ucontext;
struct ib_umem_odp;
struct dma_buf_attach_ops;
struct ib_umem {
struct ib_device *ibdev;
struct mm_struct *owning_mm;
u64 iova;
size_t length;
unsigned long address;
u32 writable : 1;
u32 is_odp : 1;
u32 is_dmabuf : 1;
struct sg_append_table sgt_append;
};
struct ib_umem_dmabuf {
struct ib_umem umem;
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct scatterlist *first_sg;
struct scatterlist *last_sg;
unsigned long first_sg_offset;
unsigned long last_sg_trim;
void *private;
u8 pinned : 1;
u8 revoked : 1;
};
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
{
return container_of(umem, struct ib_umem_dmabuf, umem);
}
/* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset(struct ib_umem *umem)
{
return umem->address & ~PAGE_MASK;
}
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
unsigned long pgsz)
{
return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
(pgsz - 1);
}
static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
unsigned long pgsz)
{
return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
ALIGN_DOWN(umem->iova, pgsz))) /
pgsz;
}
static inline size_t ib_umem_num_pages(struct ib_umem *umem)
{
return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
}
static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
struct ib_umem *umem,
unsigned long pgsz)
{
__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
umem->sgt_append.sgt.nents, pgsz);
biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
}
static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
{
return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
}
/**
* rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
* @umem: umem to iterate over
* @pgsz: Page size to split the list into
*
* pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
* returned DMA blocks will be aligned to pgsz and span the range:
* ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
*
* Performs exactly ib_umem_num_dma_blocks() iterations.
*/
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
__rdma_umem_block_iter_next(biter);)
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length);
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt);
/**
* ib_umem_find_best_pgoff - Find best HW page size
*
* @umem: umem struct
* @pgsz_bitmap bitmap of HW supported page sizes
* @pgoff_bitmask: Mask of bits that can be represented with an offset
*
* This is very similar to ib_umem_find_best_pgsz() except instead of accepting
* an IOVA it accepts a bitmask specifying what address bits can be represented
* with a page offset.
*
* For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
* and can support aligned offsets up to 4032 then pgoff_bitmask would be
* "111111000000".
*
* If the pgoff_bitmask requires either alignment in the low bit or an
* unavailable page size for the high bits, this function returns 0.
*/
static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
unsigned long pgsz_bitmap,
u64 pgoff_bitmask)
{
struct scatterlist *sg = umem->sgt_append.sgt.sgl;
dma_addr_t dma_addr;
dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
dma_addr & pgoff_bitmask);
}
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
unsigned long offset, size_t size,
int fd, int access,
const struct dma_buf_attach_ops *ops);
struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access);
struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
struct device *dma_device,
unsigned long offset, size_t size,
int fd, int access);
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
#else /* CONFIG_INFINIBAND_USER_MEM */
#include <linux/err.h>
static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size,
int access)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void ib_umem_release(struct ib_umem *umem) { }
static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length) {
return -EOPNOTSUPP;
}
static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt)
{
return 0;
}
static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
unsigned long pgsz_bitmap,
u64 pgoff_bitmask)
{
return 0;
}
static inline
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access,
struct dma_buf_attach_ops *ops)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
size_t size, int fd, int access)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
struct device *dma_device,
unsigned long offset, size_t size,
int fd, int access)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
return -EOPNOTSUPP;
}
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */