Release 4.11 drivers/base/dma-coherent.c
/*
* Coherent per-device memory handling.
* Borrowed from i386
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
unsigned long pfn_base;
int size;
int flags;
unsigned long *bitmap;
spinlock_t spinlock;
};
static bool dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
goto out;
if (!size)
goto out;
if (flags & DMA_MEMORY_MAP)
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
else
mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dma_mem)
goto out;
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dma_mem->bitmap)
goto out;
dma_mem->virt_base = mem_base;
dma_mem->device_base = device_addr;
dma_mem->pfn_base = PFN_DOWN(phys_addr);
dma_mem->size = pages;
dma_mem->flags = flags;
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
return true;
out:
kfree(dma_mem);
if (mem_base) {
if (flags & DMA_MEMORY_MAP)
memunmap(mem_base);
else
iounmap(mem_base);
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dmitry Baryshkov | 165 | 68.18% | 1 | 16.67% |
Marek Szyprowski | 38 | 15.70% | 2 | 33.33% |
Brian Starkey | 32 | 13.22% | 1 | 16.67% |
Björn Helgaas | 4 | 1.65% | 1 | 16.67% |
Michal Nazarewicz | 3 | 1.24% | 1 | 16.67% |
Total | 242 | 100.00% | 6 | 100.00% |
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
if (mem->flags & DMA_MEMORY_MAP)
memunmap(mem->virt_base);
else
iounmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 36 | 69.23% | 1 | 50.00% |
Brian Starkey | 16 | 30.77% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static int dma_assign_coherent_memory(struct device *dev,
struct dma_coherent_mem *mem)
{
if (dev->dma_mem)
return -EBUSY;
dev->dma_mem = mem;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
&mem))
return 0;
if (dma_assign_coherent_memory(dev, mem) == 0)
return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
dma_release_coherent_memory(mem);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 65 | 86.67% | 1 | 50.00% |
Michal Nazarewicz | 10 | 13.33% | 1 | 50.00% |
Total | 75 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev)
{
struct dma_coherent_mem *mem = dev->dma_mem;
if (!mem)
return;
dma_release_coherent_memory(mem);
dev->dma_mem = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dmitry Baryshkov | 31 | 86.11% | 1 | 50.00% |
Marek Szyprowski | 5 | 13.89% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_release_declared_memory);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem = dev->dma_mem;
unsigned long flags;
int pos, err;
size += device_addr & ~PAGE_MASK;
if (!mem)
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&mem->spinlock, flags);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
spin_unlock_irqrestore(&mem->spinlock, flags);
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dmitry Baryshkov | 99 | 79.20% | 1 | 33.33% |
Marek Szyprowski | 24 | 19.20% | 1 | 33.33% |
Jan Beulich | 2 | 1.60% | 1 | 33.33% |
Total | 125 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
/**
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
*
* @dev: device from which we allocate memory
* @size: size of requested memory area
* @dma_handle: This will be filled with the correct dma handle
* @ret: This pointer will be filled with the virtual address
* to allocated area.
*
* This function should be only called from per-arch dma_alloc_coherent()
* to support allocation from per-device coherent memory pools.
*
* Returns 0 if dma_alloc_coherent should continue with allocating from
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
struct dma_coherent_mem *mem;
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
if (!dev)
return 0;
mem = dev->dma_mem;
if (!mem)
return 0;
*ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
goto err;
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
if (unlikely(pageno < 0))
goto err;
/*
* Memory was found in the per-device area.
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
if (dma_memory_map)
memset(*ret, 0, size);
else
memset_io(*ret, 0, size);
return 1;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
* constraints allow it.
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dmitry Baryshkov | 95 | 42.79% | 1 | 12.50% |
Andrew Morton | 27 | 12.16% | 1 | 12.50% |
Marek Szyprowski | 24 | 10.81% | 1 | 12.50% |
Paul Mundt | 23 | 10.36% | 1 | 12.50% |
Bastian Hecht | 21 | 9.46% | 1 | 12.50% |
Brian Starkey | 17 | 7.66% | 1 | 12.50% |
Johannes Weiner | 11 | 4.95% | 1 | 12.50% |
Adrian McMenamin | 4 | 1.80% | 1 | 12.50% |
Total | 222 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(dma_alloc_from_coherent);
/**
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
* @dev: device from which the memory was allocated
* @order: the order of pages allocated
* @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, releases that memory.
*
* Returns 1 if we correctly released the memory, or 0 if
* dma_release_coherent() should proceed with releasing memory from
* generic pools.
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
unsigned long flags;
spin_lock_irqsave(&mem->spinlock, flags);
bitmap_release_region(mem->bitmap, page, order);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dmitry Baryshkov | 88 | 78.57% | 1 | 50.00% |
Marek Szyprowski | 24 | 21.43% | 1 | 50.00% |
Total | 112 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(dma_release_from_coherent);
/**
* dma_mmap_from_coherent() - try to mmap the memory allocated from
* per-device coherent memory pool to userspace
* @dev: device from which the memory was allocated
* @vma: vm_area for the userspace memory
* @vaddr: cpu address returned by dma_alloc_from_coherent
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
* @ret: result from remap_pfn_range()
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
* proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
int user_count = vma_pages(vma);
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
unsigned long pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 161 | 95.83% | 1 | 25.00% |
Muhammad Falak R Wani | 3 | 1.79% | 1 | 25.00% |
George G. Davis | 3 | 1.79% | 1 | 25.00% |
Björn Helgaas | 1 | 0.60% | 1 | 25.00% |
Total | 168 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(dma_mmap_from_coherent);
/*
* Support for reserved memory regions defined in device tree
*/
#ifdef CONFIG_OF_RESERVED_MEM
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
if (!mem &&
!dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
&mem)) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
}
rmem->priv = mem;
dma_assign_coherent_memory(dev, mem);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 94 | 98.95% | 1 | 50.00% |
Michal Nazarewicz | 1 | 1.05% | 1 | 50.00% |
Total | 95 | 100.00% | 2 | 100.00% |
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
dev->dma_mem = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static const struct reserved_mem_ops rmem_dma_ops = {
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
static int __init rmem_dma_setup(struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;
if (of_get_flat_dt_prop(node, "reusable", NULL))
return -EINVAL;
#ifdef CONFIG_ARM
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
pr_err("Reserved memory: regions without no-map are not yet supported\n");
return -EINVAL;
}
#endif
rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 93 | 100.00% | 1 | 100.00% |
Total | 93 | 100.00% | 1 | 100.00% |
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 674 | 48.18% | 2 | 9.52% |
Dmitry Baryshkov | 523 | 37.38% | 2 | 9.52% |
Brian Starkey | 68 | 4.86% | 2 | 9.52% |
Andrew Morton | 27 | 1.93% | 1 | 4.76% |
Paul Mundt | 24 | 1.72% | 1 | 4.76% |
Bastian Hecht | 21 | 1.50% | 1 | 4.76% |
Michal Nazarewicz | 14 | 1.00% | 1 | 4.76% |
Johannes Weiner | 11 | 0.79% | 1 | 4.76% |
Ingo Molnar | 10 | 0.71% | 1 | 4.76% |
Björn Helgaas | 7 | 0.50% | 1 | 4.76% |
Adrian McMenamin | 4 | 0.29% | 1 | 4.76% |
George G. Davis | 3 | 0.21% | 1 | 4.76% |
Tejun Heo | 3 | 0.21% | 1 | 4.76% |
Muhammad Falak R Wani | 3 | 0.21% | 1 | 4.76% |
Paul Gortmaker | 3 | 0.21% | 1 | 4.76% |
Jan Beulich | 2 | 0.14% | 1 | 4.76% |
Laurent Pinchart | 1 | 0.07% | 1 | 4.76% |
Marin Mitov | 1 | 0.07% | 1 | 4.76% |
Total | 1399 | 100.00% | 21 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.