Release 4.18 arch/sh/kernel/dma-nommu.c
/*
* DMA mapping support for platforms lacking IOMMUs.
*
* Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
dma_addr_t addr = page_to_phys(page) + offset
- PFN_PHYS(dev->dma_pfn_offset);
WARN_ON(size == 0);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
sh_sync_dma_for_device(page_address(page) + offset, size, dir);
return addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 62 | 76.54% | 1 | 20.00% |
Alexander Duyck | 9 | 11.11% | 1 | 20.00% |
Thomas Petazzoni | 7 | 8.64% | 1 | 20.00% |
Krzysztof Kozlowski | 2 | 2.47% | 1 | 20.00% |
Christoph Hellwig | 1 | 1.23% | 1 | 20.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
BUG_ON(!sg_page(s));
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
sh_sync_dma_for_device(sg_virt(s), s->length, dir);
s->dma_address = sg_phys(s) - offset;
s->dma_length = s->length;
}
return nents;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 103 | 81.10% | 1 | 20.00% |
Thomas Petazzoni | 12 | 9.45% | 1 | 20.00% |
Alexander Duyck | 9 | 7.09% | 1 | 20.00% |
Krzysztof Kozlowski | 2 | 1.57% | 1 | 20.00% |
Christoph Hellwig | 1 | 0.79% | 1 | 20.00% |
Total | 127 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_DMA_NONCOHERENT
static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
sh_sync_dma_for_device(phys_to_virt(addr), size, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 31 | 93.94% | 1 | 50.00% |
Christoph Hellwig | 2 | 6.06% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nelems, i)
sh_sync_dma_for_device(sg_virt(s), s->length, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 53 | 96.36% | 1 | 50.00% |
Christoph Hellwig | 2 | 3.64% | 1 | 50.00% |
Total | 55 | 100.00% | 2 | 100.00% |
#endif
const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
.map_page = nommu_map_page,
.map_sg = nommu_map_sg,
#ifdef CONFIG_DMA_NONCOHERENT
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
#endif
};
void __init no_iommu_init(void)
{
if (dma_ops)
return;
dma_ops = &nommu_dma_ops;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 316 | 84.95% | 3 | 30.00% |
Thomas Petazzoni | 19 | 5.11% | 1 | 10.00% |
Alexander Duyck | 18 | 4.84% | 1 | 10.00% |
Christoph Hellwig | 12 | 3.23% | 2 | 20.00% |
Krzysztof Kozlowski | 4 | 1.08% | 1 | 10.00% |
Andrzej Pietrasiewicz | 2 | 0.54% | 1 | 10.00% |
Bart Van Assche | 1 | 0.27% | 1 | 10.00% |
Total | 372 | 100.00% | 10 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.