Contributors: 24
Author Tokens Token Proportion Commits Commit Proportion
Christoph Hellwig 102 20.16% 11 26.19%
FUJITA Tomonori 89 17.59% 2 4.76%
Leon Romanovsky 72 14.23% 2 4.76%
David Brownell 60 11.86% 1 2.38%
Robin Murphy 38 7.51% 3 7.14%
Alex Williamson 18 3.56% 1 2.38%
Marek Szyprowski 16 3.16% 4 9.52%
Andi Kleen 14 2.77% 1 2.38%
James Bottomley 14 2.77% 2 4.76%
Krzysztof Kozlowski 12 2.37% 1 2.38%
Russell King 12 2.37% 1 2.38%
Saravana Kannan 10 1.98% 1 2.38%
Yoshihiro Shimoda 9 1.78% 1 2.38%
Nicolin Chen 8 1.58% 1 2.38%
Jia He 7 1.38% 1 2.38%
Linus Torvalds 6 1.19% 1 2.38%
John Garry 5 0.99% 1 2.38%
Mark Nelson 3 0.59% 1 2.38%
Bart Van Assche 3 0.59% 1 2.38%
Paul Mackerras 2 0.40% 1 2.38%
Deepak Saxena 2 0.40% 1 2.38%
Herve Codina 2 0.40% 1 2.38%
Tejun Heo 1 0.20% 1 2.38%
Greg Kroah-Hartman 1 0.20% 1 2.38%
Total 506 42


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
 *
 * DMA operations that map physical memory through IOMMU.
 */
#ifndef _LINUX_IOMMU_DMA_H
#define _LINUX_IOMMU_DMA_H

#include <linux/dma-direction.h>

#ifdef CONFIG_IOMMU_DMA
static inline bool use_dma_iommu(struct device *dev)
{
	return dev->dma_iommu;
}
#else
static inline bool use_dma_iommu(struct device *dev)
{
	return false;
}
#endif /* CONFIG_IOMMU_DMA */

dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs);
void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir, unsigned long attrs);
void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
		gfp_t gfp, unsigned long attrs);
int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs);
int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs);
unsigned long iommu_dma_get_merge_boundary(struct device *dev);
size_t iommu_dma_opt_mapping_size(void);
size_t iommu_dma_max_mapping_size(struct device *dev);
void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t handle, unsigned long attrs);
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
		size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs);
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
		struct sg_table *sgt, enum dma_data_direction dir);
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
		struct sg_table *sgt);
#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
	vunmap(vaddr);
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
		size_t size, struct sg_table *sgt);
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir);
void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir);
void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
		int nelems, enum dma_data_direction dir);
void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
		int nelems, enum dma_data_direction dir);

#endif /* _LINUX_IOMMU_DMA_H */