Release 4.18 arch/microblaze/kernel/dma.c
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009-2010 PetaLogix
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
* Provide default implementations of the DMA mapping callbacks for
* directly mapped busses.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <asm/cacheflush.h>
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
return consistent_alloc(flag, size, dma_handle);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Simek | 31 | 86.11% | 2 | 40.00% |
Andrzej Pietrasiewicz | 2 | 5.56% | 1 | 20.00% |
Krzysztof Kozlowski | 2 | 5.56% | 1 | 20.00% |
Christoph Hellwig | 1 | 2.78% | 1 | 20.00% |
Total | 36 | 100.00% | 5 | 100.00% |
static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
consistent_free(size, vaddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Simek | 27 | 84.38% | 3 | 50.00% |
Andrzej Pietrasiewicz | 2 | 6.25% | 1 | 16.67% |
Krzysztof Kozlowski | 2 | 6.25% | 1 | 16.67% |
Christoph Hellwig | 1 | 3.12% | 1 | 16.67% |
Total | 32 | 100.00% | 6 | 100.00% |
static inline void __dma_sync(unsigned long paddr,
size_t size, enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
case DMA_BIDIRECTIONAL:
flush_dcache_range(paddr, paddr + size);
break;
case DMA_FROM_DEVICE:
invalidate_dcache_range(paddr, paddr + size);
break;
default:
BUG();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *sg;
int i;
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;
__dma_sync(sg_phys(sg), sg->length, direction);
}
return nents;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Simek | 69 | 85.19% | 2 | 28.57% |
Alexander Duyck | 7 | 8.64% | 1 | 14.29% |
Krzysztof Kozlowski | 2 | 2.47% | 1 | 14.29% |
Christoph Hellwig | 1 | 1.23% | 1 | 14.29% |
Eli Billauer | 1 | 1.23% | 1 | 14.29% |
Geliang Tang | 1 | 1.23% | 1 | 14.29% |
Total | 81 | 100.00% | 7 | 100.00% |
static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(page_to_phys(page) + offset, size, direction);
return page_to_phys(page) + offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Simek | 49 | 77.78% | 3 | 42.86% |
Alexander Duyck | 9 | 14.29% | 1 | 14.29% |
Eli Billauer | 2 | 3.17% | 1 | 14.29% |
Krzysztof Kozlowski | 2 | 3.17% | 1 | 14.29% |
Christoph Hellwig | 1 | 1.59% | 1 | 14.29% |
Total | 63 | 100.00% | 7 | 100.00% |
static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
/* There is not necessary to do cache cleanup
*
* phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
* dma_address is physical address
*/
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(dma_address, size, direction);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Simek | 32 | 71.11% | 3 | 42.86% |
Alexander Duyck | 9 | 20.00% | 1 | 14.29% |
Krzysztof Kozlowski | 2 | 4.44% | 1 | 14.29% |
Christoph Hellwig | 1 | 2.22% | 1 | 14.29% |
Eli Billauer | 1 | 2.22% | 1 | 14.29% |
Total | 45 | 100.00% | 7 | 100.00% |
static inline void
dma_nommu_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
/*
* It's pointless to flush the cache as the memory segment
* is given to the CPU
*/
if (direction == DMA_FROM_DEVICE)
__dma_sync(dma_handle, size, direction);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eli Billauer | 37 | 97.37% | 1 | 50.00% |
Christoph Hellwig | 1 | 2.63% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
static inline void
dma_nommu_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
/*
* It's pointless to invalidate the cache if the device isn't
* supposed to write to the relevant region
*/
if (direction == DMA_TO_DEVICE)
__dma_sync(dma_handle, size, direction);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eli Billauer | 37 | 97.37% | 1 | 50.00% |
Christoph Hellwig | 1 | 2.63% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
static inline void
dma_nommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
/* FIXME this part of code is untested */
if (direction == DMA_FROM_DEVICE)
for_each_sg(sgl, sg, nents, i)
__dma_sync(sg->dma_address, sg->length, direction);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eli Billauer | 61 | 98.39% | 1 | 50.00% |
Christoph Hellwig | 1 | 1.61% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
static inline void
dma_nommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
/* FIXME this part of code is untested */
if (direction == DMA_TO_DEVICE)
for_each_sg(sgl, sg, nents, i)
__dma_sync(sg->dma_address, sg->length, direction);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eli Billauer | 61 | 98.39% | 1 | 50.00% |
Christoph Hellwig | 1 | 1.61% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
static
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
if (off >= count || user_count > (count - off))
return -ENXIO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = consistent_virt_to_pfn(cpu_addr);
return remap_pfn_range(vma, vma->vm_start, pfn + off,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
#else
return -ENXIO;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lars-Peter Clausen | 128 | 94.81% | 1 | 20.00% |
Thomas Meyer | 3 | 2.22% | 1 | 20.00% |
Krzysztof Kozlowski | 2 | 1.48% | 1 | 20.00% |
Michal Simek | 1 | 0.74% | 1 | 20.00% |
Christoph Hellwig | 1 | 0.74% | 1 | 20.00% |
Total | 135 | 100.00% | 5 | 100.00% |
const struct dma_map_ops dma_nommu_ops = {
.alloc = dma_nommu_alloc_coherent,
.free = dma_nommu_free_coherent,
.mmap = dma_nommu_mmap_coherent,
.map_sg = dma_nommu_map_sg,
.map_page = dma_nommu_map_page,
.unmap_page = dma_nommu_unmap_page,
.sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
.sync_single_for_device = dma_nommu_sync_single_for_device,
.sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
.sync_sg_for_device = dma_nommu_sync_sg_for_device,
};
EXPORT_SYMBOL(dma_nommu_ops);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Simek | 249 | 33.92% | 7 | 33.33% |
Eli Billauer | 216 | 29.43% | 2 | 9.52% |
Lars-Peter Clausen | 132 | 17.98% | 1 | 4.76% |
Christoph Hellwig | 82 | 11.17% | 2 | 9.52% |
Alexander Duyck | 25 | 3.41% | 1 | 4.76% |
Krzysztof Kozlowski | 12 | 1.63% | 1 | 4.76% |
Andrzej Pietrasiewicz | 6 | 0.82% | 1 | 4.76% |
Tejun Heo | 3 | 0.41% | 1 | 4.76% |
Thomas Meyer | 3 | 0.41% | 1 | 4.76% |
Paul Gortmaker | 3 | 0.41% | 1 | 4.76% |
Bart Van Assche | 1 | 0.14% | 1 | 4.76% |
Greg Kroah-Hartman | 1 | 0.14% | 1 | 4.76% |
Geliang Tang | 1 | 0.14% | 1 | 4.76% |
Total | 734 | 100.00% | 21 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.