Contributors: 10
| Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
| Robin Murphy |
58 |
26.85% |
2 |
14.29% |
| Catalin Marinas |
48 |
22.22% |
2 |
14.29% |
| Leon Romanovsky |
41 |
18.98% |
2 |
14.29% |
| Suravee Suthikulpanit |
23 |
10.65% |
1 |
7.14% |
| Laura Abbott |
14 |
6.48% |
1 |
7.14% |
| David Brownell |
12 |
5.56% |
1 |
7.14% |
| Krzysztof Kozlowski |
10 |
4.63% |
1 |
7.14% |
| James Bottomley |
6 |
2.78% |
1 |
7.14% |
| Martin Oliveira |
2 |
0.93% |
1 |
7.14% |
| Christoph Hellwig |
2 |
0.93% |
2 |
14.29% |
| Total |
216 |
|
14 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Dummy DMA ops that always fail.
*/
#include <linux/dma-map-ops.h>
static int dma_dummy_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return -ENXIO;
}
static dma_addr_t dma_dummy_map_phys(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return DMA_MAPPING_ERROR;
}
static void dma_dummy_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
/*
* Dummy ops doesn't support map_phys, so unmap_page should never be
* called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
return -EINVAL;
}
static void dma_dummy_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
/*
* Dummy ops doesn't support map_sg, so unmap_sg should never be called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_supported(struct device *hwdev, u64 mask)
{
return 0;
}
const struct dma_map_ops dma_dummy_ops = {
.mmap = dma_dummy_mmap,
.map_phys = dma_dummy_map_phys,
.unmap_phys = dma_dummy_unmap_phys,
.map_sg = dma_dummy_map_sg,
.unmap_sg = dma_dummy_unmap_sg,
.dma_supported = dma_dummy_supported,
};