Release 4.14 arch/arc/mm/dma.c
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
* implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.
*/
#include <linux/dma-mapping.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
static void *arc_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long order = get_order(size);
struct page *page;
phys_addr_t paddr;
void *kvaddr;
int need_coh = 1, need_kvaddr = 0;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/*
* IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory
*
* The gains with IOC are two pronged:
* -For streaming data, elides need for cache maintenance, saving
* cycles in flush code, and bus bandwidth as all the lines of a
* buffer need to be flushed out to memory
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
/*
* - A coherent buffer needs MMU mapping to enforce non-cachability
* - A highmem page needs a virtual handle (hence MMU mapping)
* independent of cachability
*/
if (PageHighMem(page) || need_coh)
need_kvaddr = 1;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
*dma_handle = plat_phys_to_dma(dev, paddr);
/* This is kernel Virtual address (0x7000_0000 based) */
if (need_kvaddr) {
kvaddr = ioremap_nocache(paddr, size);
if (kvaddr == NULL) {
__free_pages(page, order);
return NULL;
}
} else {
kvaddr = (void *)(u32)paddr;
}
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
* Yeah this bit us - STAR 9000898266
*
* Although core does call flush_cache_vmap(), it gets kvaddr hence
* can't be used to efficiently flush L1 and/or L2 which need paddr
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
if (need_coh)
dma_cache_wback_inv(paddr, size);
return kvaddr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vineet Gupta | 138 | 73.40% | 6 | 75.00% |
Christoph Hellwig | 44 | 23.40% | 1 | 12.50% |
Krzysztof Kozlowski | 6 | 3.19% | 1 | 12.50% |
Total | 188 | 100.00% | 8 | 100.00% |
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vineet Gupta | 57 | 59.38% | 4 | 57.14% |
Christoph Hellwig | 22 | 22.92% | 1 | 14.29% |
Vladimir Kondratiev | 11 | 11.46% | 1 | 14.29% |
Krzysztof Kozlowski | 6 | 6.25% | 1 | 14.29% |
Total | 96 | 100.00% | 7 | 100.00% |
static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
unsigned long off = vma->vm_pgoff;
int ret = -ENXIO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Brodkin | 149 | 99.33% | 1 | 50.00% |
Vladimir Murzin | 1 | 0.67% | 1 | 50.00% |
Total | 150 | 100.00% | 2 | 100.00% |
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
* consistent before each use
*/
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 37 | 56.06% | 1 | 25.00% |
Vineet Gupta | 23 | 34.85% | 2 | 50.00% |
Alexey Brodkin | 6 | 9.09% | 1 | 25.00% |
Total | 66 | 100.00% | 4 | 100.00% |
/*
* arc_dma_map_page - map a portion of a page for streaming DMA
*
* Ensure that any data held in the cache is appropriately discarded
* or written back.
*
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_page().
*
* Note: while it takes struct page as arg, caller can "abuse" it to pass
* a region larger than PAGE_SIZE, provided it is physically contiguous
* and this still works correctly
*/
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);
return plat_phys_to_dma(dev, paddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 42 | 62.69% | 1 | 14.29% |
Vineet Gupta | 14 | 20.90% | 4 | 57.14% |
Alexander Duyck | 9 | 13.43% | 1 | 14.29% |
Krzysztof Kozlowski | 2 | 2.99% | 1 | 14.29% |
Total | 67 | 100.00% | 7 | 100.00% |
/*
* arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*
* Note: historically this routine was not implemented for ARC
*/
static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t paddr = plat_dma_to_phys(dev, handle);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vineet Gupta | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 67 | 93.06% | 1 | 33.33% |
Vineet Gupta | 3 | 4.17% | 1 | 33.33% |
Krzysztof Kozlowski | 2 | 2.78% | 1 | 33.33% |
Total | 72 | 100.00% | 3 | 100.00% |
static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir,
attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vineet Gupta | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
static void arc_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 20 | 57.14% | 1 | 33.33% |
Vineet Gupta | 15 | 42.86% | 2 | 66.67% |
Total | 35 | 100.00% | 3 | 100.00% |
static void arc_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 18 | 51.43% | 1 | 25.00% |
Vineet Gupta | 12 | 34.29% | 2 | 50.00% |
Alexey Brodkin | 5 | 14.29% | 1 | 25.00% |
Total | 35 | 100.00% | 4 | 100.00% |
static void arc_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
_dma_cache_sync(sg_phys(sg), sg->length, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 47 | 85.45% | 1 | 33.33% |
Vineet Gupta | 8 | 14.55% | 2 | 66.67% |
Total | 55 | 100.00% | 3 | 100.00% |
static void arc_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
_dma_cache_sync(sg_phys(sg), sg->length, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 38 | 69.09% | 1 | 33.33% |
Vineet Gupta | 17 | 30.91% | 2 | 66.67% |
Total | 55 | 100.00% | 3 | 100.00% |
static int arc_dma_supported(struct device *dev, u64 dma_mask)
{
/* Support 32 bit DMA mask exclusively */
return dma_mask == DMA_BIT_MASK(32);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
const struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
.mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
.unmap_page = arc_dma_unmap_page,
.map_sg = arc_dma_map_sg,
.unmap_sg = arc_dma_unmap_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
.sync_single_for_cpu = arc_dma_sync_single_for_cpu,
.sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
.sync_sg_for_device = arc_dma_sync_sg_for_device,
.dma_supported = arc_dma_supported,
};
EXPORT_SYMBOL(arc_dma_ops);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vineet Gupta | 427 | 40.86% | 9 | 50.00% |
Christoph Hellwig | 411 | 39.33% | 1 | 5.56% |
Alexey Brodkin | 168 | 16.08% | 2 | 11.11% |
Krzysztof Kozlowski | 16 | 1.53% | 1 | 5.56% |
Vladimir Kondratiev | 11 | 1.05% | 1 | 5.56% |
Alexander Duyck | 9 | 0.86% | 1 | 5.56% |
Andrea Gelmini | 1 | 0.10% | 1 | 5.56% |
Vladimir Murzin | 1 | 0.10% | 1 | 5.56% |
Bart Van Assche | 1 | 0.10% | 1 | 5.56% |
Total | 1045 | 100.00% | 18 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.