Release 4.7 drivers/base/dma-coherent.c
/*
* Coherent per-device memory handling.
* Borrowed from i386
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
unsigned long pfn_base;
int size;
int flags;
unsigned long *bitmap;
spinlock_t spinlock;
};
static bool dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
goto out;
if (!size)
goto out;
if (flags & DMA_MEMORY_MAP)
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
else
mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dma_mem)
goto out;
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dma_mem->bitmap)
goto out;
dma_mem->virt_base = mem_base;
dma_mem->device_base = device_addr;
dma_mem->pfn_base = PFN_DOWN(phys_addr);
dma_mem->size = pages;
dma_mem->flags = flags;
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
return true;
out:
kfree(dma_mem);
if (mem_base) {
if (flags & DMA_MEMORY_MAP)
memunmap(mem_base);
else
iounmap(mem_base);
}
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dmitry eremin-baryshkov | dmitry eremin-baryshkov | 165 | 68.18% | 1 | 16.67% |
marek szyprowski | marek szyprowski | 38 | 15.70% | 2 | 33.33% |
brian starkey | brian starkey | 32 | 13.22% | 1 | 16.67% |
bjorn helgaas | bjorn helgaas | 4 | 1.65% | 1 | 16.67% |
michal nazarewicz | michal nazarewicz | 3 | 1.24% | 1 | 16.67% |
| Total | 242 | 100.00% | 6 | 100.00% |
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
if (mem->flags & DMA_MEMORY_MAP)
memunmap(mem->virt_base);
else
iounmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 36 | 69.23% | 1 | 50.00% |
brian starkey | brian starkey | 16 | 30.77% | 1 | 50.00% |
| Total | 52 | 100.00% | 2 | 100.00% |
static int dma_assign_coherent_memory(struct device *dev,
struct dma_coherent_mem *mem)
{
if (dev->dma_mem)
return -EBUSY;
dev->dma_mem = mem;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 36 | 100.00% | 1 | 100.00% |
| Total | 36 | 100.00% | 1 | 100.00% |
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
&mem))
return 0;
if (dma_assign_coherent_memory(dev, mem) == 0)
return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
dma_release_coherent_memory(mem);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 65 | 86.67% | 1 | 50.00% |
michal nazarewicz | michal nazarewicz | 10 | 13.33% | 1 | 50.00% |
| Total | 75 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev)
{
struct dma_coherent_mem *mem = dev->dma_mem;
if (!mem)
return;
dma_release_coherent_memory(mem);
dev->dma_mem = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dmitry eremin-baryshkov | dmitry eremin-baryshkov | 31 | 86.11% | 1 | 50.00% |
marek szyprowski | marek szyprowski | 5 | 13.89% | 1 | 50.00% |
| Total | 36 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_release_declared_memory);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem = dev->dma_mem;
unsigned long flags;
int pos, err;
size += device_addr & ~PAGE_MASK;
if (!mem)
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&mem->spinlock, flags);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
spin_unlock_irqrestore(&mem->spinlock, flags);
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dmitry eremin-baryshkov | dmitry eremin-baryshkov | 99 | 79.20% | 1 | 33.33% |
marek szyprowski | marek szyprowski | 24 | 19.20% | 1 | 33.33% |
jan beulich | jan beulich | 2 | 1.60% | 1 | 33.33% |
| Total | 125 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
/**
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
*
* @dev: device from which we allocate memory
* @size: size of requested memory area
* @dma_handle: This will be filled with the correct dma handle
* @ret: This pointer will be filled with the virtual address
* to allocated area.
*
* This function should be only called from per-arch dma_alloc_coherent()
* to support allocation from per-device coherent memory pools.
*
* Returns 0 if dma_alloc_coherent should continue with allocating from
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
struct dma_coherent_mem *mem;
int order = get_order(size);
unsigned long flags;
int pageno;
if (!dev)
return 0;
mem = dev->dma_mem;
if (!mem)
return 0;
*ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
goto err;
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
if (unlikely(pageno < 0))
goto err;
/*
* Memory was found in the per-device area.
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
if (mem->flags & DMA_MEMORY_MAP)
memset(*ret, 0, size);
else
memset_io(*ret, 0, size);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
* constraints allow it.
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dmitry eremin-baryshkov | dmitry eremin-baryshkov | 95 | 44.60% | 1 | 14.29% |
marek szyprowski | marek szyprowski | 34 | 15.96% | 1 | 14.29% |
andrew morton | andrew morton | 27 | 12.68% | 1 | 14.29% |
paul mundt | paul mundt | 23 | 10.80% | 1 | 14.29% |
brian starkey | brian starkey | 19 | 8.92% | 1 | 14.29% |
johannes weiner | johannes weiner | 11 | 5.16% | 1 | 14.29% |
adrian mcmenamin | adrian mcmenamin | 4 | 1.88% | 1 | 14.29% |
| Total | 213 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(dma_alloc_from_coherent);
/**
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
* @dev: device from which the memory was allocated
* @order: the order of pages allocated
* @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, releases that memory.
*
* Returns 1 if we correctly released the memory, or 0 if
* dma_release_coherent() should proceed with releasing memory from
* generic pools.
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
unsigned long flags;
spin_lock_irqsave(&mem->spinlock, flags);
bitmap_release_region(mem->bitmap, page, order);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dmitry eremin-baryshkov | dmitry eremin-baryshkov | 88 | 78.57% | 1 | 50.00% |
marek szyprowski | marek szyprowski | 24 | 21.43% | 1 | 50.00% |
| Total | 112 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_release_from_coherent);
/**
* dma_mmap_from_coherent() - try to mmap the memory allocated from
* per-device coherent memory pool to userspace
* @dev: device from which the memory was allocated
* @vma: vm_area for the userspace memory
* @vaddr: cpu address returned by dma_alloc_from_coherent
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
* @ret: result from remap_pfn_range()
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
* proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int count = size >> PAGE_SHIFT;
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
unsigned long pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return 1;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 171 | 99.42% | 1 | 50.00% |
bjorn helgaas | bjorn helgaas | 1 | 0.58% | 1 | 50.00% |
| Total | 172 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_mmap_from_coherent);
/*
* Support for reserved memory regions defined in device tree
*/
#ifdef CONFIG_OF_RESERVED_MEM
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
if (!mem &&
!dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
&mem)) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
}
rmem->priv = mem;
dma_assign_coherent_memory(dev, mem);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 94 | 98.95% | 1 | 50.00% |
michal nazarewicz | michal nazarewicz | 1 | 1.05% | 1 | 50.00% |
| Total | 95 | 100.00% | 2 | 100.00% |
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
dev->dma_mem = NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
static const struct reserved_mem_ops rmem_dma_ops = {
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
static int __init rmem_dma_setup(struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;
if (of_get_flat_dt_prop(node, "reusable", NULL))
return -EINVAL;
#ifdef CONFIG_ARM
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
pr_err("Reserved memory: regions without no-map are not yet supported\n");
return -EINVAL;
}
#endif
rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 93 | 100.00% | 1 | 100.00% |
| Total | 93 | 100.00% | 1 | 100.00% |
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
#endif
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 694 | 49.78% | 2 | 11.11% |
dmitry eremin-baryshkov | dmitry eremin-baryshkov | 523 | 37.52% | 2 | 11.11% |
brian starkey | brian starkey | 70 | 5.02% | 2 | 11.11% |
andrew morton | andrew morton | 27 | 1.94% | 1 | 5.56% |
paul mundt | paul mundt | 24 | 1.72% | 1 | 5.56% |
michal nazarewicz | michal nazarewicz | 14 | 1.00% | 1 | 5.56% |
johannes weiner | johannes weiner | 11 | 0.79% | 1 | 5.56% |
ingo molnar | ingo molnar | 10 | 0.72% | 1 | 5.56% |
bjorn helgaas | bjorn helgaas | 7 | 0.50% | 1 | 5.56% |
adrian mcmenamin | adrian mcmenamin | 4 | 0.29% | 1 | 5.56% |
paul gortmaker | paul gortmaker | 3 | 0.22% | 1 | 5.56% |
tejun heo | tejun heo | 3 | 0.22% | 1 | 5.56% |
jan beulich | jan beulich | 2 | 0.14% | 1 | 5.56% |
marin mitov | marin mitov | 1 | 0.07% | 1 | 5.56% |
laurent pinchart | laurent pinchart | 1 | 0.07% | 1 | 5.56% |
| Total | 1394 | 100.00% | 18 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.