cregit-Linux how code gets into the kernel

Release 4.14 drivers/base/dma-coherent.c

Directory: drivers/base
// SPDX-License-Identifier: GPL-2.0
/*
 * Coherent per-device memory handling.
 * Borrowed from i386
 */
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>


struct dma_coherent_mem {
	
void		*virt_base;
	
dma_addr_t	device_base;
	
unsigned long	pfn_base;
	
int		size;
	
int		flags;
	
unsigned long	*bitmap;
	
spinlock_t	spinlock;
	
bool		use_dev_dma_pfn_offset;
};


static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;


static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) { if (dev && dev->dma_mem) return dev->dma_mem; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin30100.00%2100.00%
Total30100.00%2100.00%


static inline dma_addr_t dma_get_device_base(struct device *dev, struct dma_coherent_mem * mem) { if (mem->use_dev_dma_pfn_offset) return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; else return mem->device_base; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin42100.00%1100.00%
Total42100.00%1100.00%


static int dma_init_coherent_memory( phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, struct dma_coherent_mem **mem) { struct dma_coherent_mem *dma_mem = NULL; void __iomem *mem_base = NULL; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); int ret; if (!size) { ret = -EINVAL; goto out; } mem_base = memremap(phys_addr, size, MEMREMAP_WC); if (!mem_base) { ret = -EINVAL; goto out; } dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); if (!dma_mem) { ret = -ENOMEM; goto out; } dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!dma_mem->bitmap) { ret = -ENOMEM; goto out; } dma_mem->virt_base = mem_base; dma_mem->device_base = device_addr; dma_mem->pfn_base = PFN_DOWN(phys_addr); dma_mem->size = pages; dma_mem->flags = flags; spin_lock_init(&dma_mem->spinlock); *mem = dma_mem; return 0; out: kfree(dma_mem); if (mem_base) memunmap(mem_base); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Baryshkov13559.73%116.67%
Marek Szyprowski3816.81%233.33%
Arnd Bergmann3415.04%116.67%
Brian Starkey167.08%116.67%
Björn Helgaas31.33%116.67%
Total226100.00%6100.00%


static void dma_release_coherent_memory(struct dma_coherent_mem *mem) { if (!mem) return; memunmap(mem->virt_base); kfree(mem->bitmap); kfree(mem); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski2980.56%150.00%
Brian Starkey719.44%150.00%
Total36100.00%2100.00%


static int dma_assign_coherent_memory(struct device *dev, struct dma_coherent_mem *mem) { if (!dev) return -ENODEV; if (dev->dma_mem) return -EBUSY; dev->dma_mem = mem; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski3579.55%150.00%
Vladimir Murzin920.45%150.00%
Total44100.00%2100.00%


int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { struct dma_coherent_mem *mem; int ret; ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); if (ret) return ret; ret = dma_assign_coherent_memory(dev, mem); if (ret) dma_release_coherent_memory(mem); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski5777.03%150.00%
Christoph Hellwig1722.97%150.00%
Total74100.00%2100.00%

EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev) { struct dma_coherent_mem *mem = dev->dma_mem; if (!mem) return; dma_release_coherent_memory(mem); dev->dma_mem = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Baryshkov3186.11%150.00%
Marek Szyprowski513.89%150.00%
Total36100.00%2100.00%

EXPORT_SYMBOL(dma_release_declared_memory);
void *dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size) { struct dma_coherent_mem *mem = dev->dma_mem; unsigned long flags; int pos, err; size += device_addr & ~PAGE_MASK; if (!mem) return ERR_PTR(-EINVAL); spin_lock_irqsave(&mem->spinlock, flags); pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); spin_unlock_irqrestore(&mem->spinlock, flags); if (err != 0) return ERR_PTR(err); return mem->virt_base + (pos << PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Baryshkov9373.23%125.00%
Marek Szyprowski2418.90%125.00%
Vladimir Murzin86.30%125.00%
Jan Beulich21.57%125.00%
Total127100.00%4100.00%

EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, ssize_t size, dma_addr_t *dma_handle) { int order = get_order(size); unsigned long flags; int pageno; void *ret; spin_lock_irqsave(&mem->spinlock, flags); if (unlikely(size > (mem->size << PAGE_SHIFT))) goto err; pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); if (unlikely(pageno < 0)) goto err; /* * Memory was found in the coherent area. */ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); ret = mem->virt_base + (pageno << PAGE_SHIFT); spin_unlock_irqrestore(&mem->spinlock, flags); memset(ret, 0, size); return ret; err: spin_unlock_irqrestore(&mem->spinlock, flags); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Baryshkov6642.31%112.50%
Marek Szyprowski2415.38%112.50%
Vladimir Murzin1710.90%112.50%
Paul Mundt159.62%112.50%
Johannes Weiner117.05%112.50%
Bastian Hecht106.41%112.50%
Andrew Morton95.77%112.50%
Adrian McMenamin42.56%112.50%
Total156100.00%8100.00%

/** * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool * @dev: device from which we allocate memory * @size: size of requested memory area * @dma_handle: This will be filled with the correct dma handle * @ret: This pointer will be filled with the virtual address * to allocated area. * * This function should be only called from per-arch dma_alloc_coherent() * to support allocation from per-device coherent memory pools. * * Returns 0 if dma_alloc_coherent should continue with allocating from * generic memory areas, or !0 if dma_alloc_coherent should return @ret. */
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); if (!mem) return 0; *ret = __dma_alloc_from_coherent(mem, size, dma_handle); if (*ret) return 1; /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the * constraints allow it. */ return mem->flags & DMA_MEMORY_EXCLUSIVE; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin5986.76%133.33%
Dmitry Baryshkov710.29%133.33%
Paul Mundt22.94%133.33%
Total68100.00%3100.00%

EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { if (!dma_coherent_default_memory) return NULL; return __dma_alloc_from_coherent(dma_coherent_default_memory, size, dma_handle); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin31100.00%1100.00%
Total31100.00%1100.00%


static int __dma_release_from_coherent(struct dma_coherent_mem *mem, int order, void *vaddr) { if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; unsigned long flags; spin_lock_irqsave(&mem->spinlock, flags); bitmap_release_region(mem->bitmap, page, order); spin_unlock_irqrestore(&mem->spinlock, flags); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Baryshkov7272.00%133.33%
Marek Szyprowski2424.00%133.33%
Vladimir Murzin44.00%133.33%
Total100100.00%3100.00%

/** * dma_release_from_dev_coherent() - free memory to device coherent memory pool * @dev: device from which the memory was allocated * @order: the order of pages allocated * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, releases that memory. * * Returns 1 if we correctly released the memory, or 0 if the caller should * proceed with releasing memory from generic pools. */
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); return __dma_release_from_coherent(mem, order, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin37100.00%1100.00%
Total37100.00%1100.00%

EXPORT_SYMBOL(dma_release_from_dev_coherent);
int dma_release_from_global_coherent(int order, void *vaddr) { if (!dma_coherent_default_memory) return 0; return __dma_release_from_coherent(dma_coherent_default_memory, order, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin30100.00%1100.00%
Total30100.00%1100.00%


static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; int user_count = vma_pages(vma); int count = PAGE_ALIGN(size) >> PAGE_SHIFT; *ret = -ENXIO; if (off < count && user_count <= count - off) { unsigned long pfn = mem->pfn_base + start + off; *ret = remap_pfn_range(vma, vma->vm_start, pfn, user_count << PAGE_SHIFT, vma->vm_page_prot); } return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski14592.95%120.00%
Vladimir Murzin42.56%120.00%
George G. Davis31.92%120.00%
Muhammad Falak R Wani31.92%120.00%
Björn Helgaas10.64%120.00%
Total156100.00%5100.00%

/** * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool * @dev: device from which the memory was allocated * @vma: vm_area for the userspace memory * @vaddr: cpu address returned by dma_alloc_from_dev_coherent * @size: size of the memory buffer allocated * @ret: result from remap_pfn_range() * * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, maps that memory to the provided vma. * * Returns 1 if we correctly mapped the memory, or 0 if the caller should * proceed with mapping memory from generic pools. */
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin50100.00%1100.00%
Total50100.00%1100.00%

EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { if (!dma_coherent_default_memory) return 0; return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, vaddr, size, ret); }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin43100.00%1100.00%
Total43100.00%1100.00%

/* * Support for reserved memory regions defined in device tree */ #ifdef CONFIG_OF_RESERVED_MEM #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_reserved_mem.h> static struct reserved_mem *dma_reserved_default_memory __initdata;
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { struct dma_coherent_mem *mem = rmem->priv; int ret; if (!mem) { ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, DMA_MEMORY_EXCLUSIVE, &mem); if (ret) { pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", &rmem->base, (unsigned long)rmem->size / SZ_1M); return ret; } } mem->use_dev_dma_pfn_offset = true; rmem->priv = mem; dma_assign_coherent_memory(dev, mem); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski8982.41%125.00%
Arnd Bergmann1312.04%250.00%
Vladimir Murzin65.56%125.00%
Total108100.00%4100.00%


static void rmem_dma_device_release(struct reserved_mem *rmem, struct device *dev) { if (dev) dev->dma_mem = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski2284.62%150.00%
Vladimir Murzin415.38%150.00%
Total26100.00%2100.00%

static const struct reserved_mem_ops rmem_dma_ops = { .device_init = rmem_dma_device_init, .device_release = rmem_dma_device_release, };
static int __init rmem_dma_setup(struct reserved_mem *rmem) { unsigned long node = rmem->fdt_node; if (of_get_flat_dt_prop(node, "reusable", NULL)) return -EINVAL; #ifdef CONFIG_ARM if (!of_get_flat_dt_prop(node, "no-map", NULL)) { pr_err("Reserved memory: regions without no-map are not yet supported\n"); return -EINVAL; } if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { WARN(dma_reserved_default_memory, "Reserved memory: region for default DMA coherent area is redefined\n"); dma_reserved_default_memory = rmem; } #endif rmem->ops = &rmem_dma_ops; pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", &rmem->base, (unsigned long)rmem->size / SZ_1M); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski9379.49%150.00%
Vladimir Murzin2420.51%150.00%
Total117100.00%2100.00%


static int __init dma_init_reserved_memory(void) { const struct reserved_mem_ops *ops; int ret; if (!dma_reserved_default_memory) return -ENOMEM; ops = dma_reserved_default_memory->ops; /* * We rely on rmem_dma_device_init() does not propagate error of * dma_assign_coherent_memory() for "NULL" device. */ ret = ops->device_init(dma_reserved_default_memory, NULL); if (!ret) { dma_coherent_default_memory = dma_reserved_default_memory->priv; pr_info("DMA: default coherent area is set\n"); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Vladimir Murzin66100.00%1100.00%
Total66100.00%1100.00%

core_initcall(dma_init_reserved_memory); RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski63636.41%28.33%
Vladimir Murzin49228.16%312.50%
Dmitry Baryshkov44825.64%14.17%
Arnd Bergmann472.69%28.33%
Brian Starkey261.49%14.17%
Paul Mundt170.97%14.17%
Christoph Hellwig170.97%14.17%
Johannes Weiner110.63%14.17%
Bastian Hecht100.57%14.17%
Andrew Morton90.52%14.17%
Ingo Molnar80.46%14.17%
Björn Helgaas60.34%14.17%
Adrian McMenamin40.23%14.17%
Paul Gortmaker30.17%14.17%
Tejun Heo30.17%14.17%
Muhammad Falak R Wani30.17%14.17%
George G. Davis30.17%14.17%
Jan Beulich20.11%14.17%
Greg Kroah-Hartman10.06%14.17%
Marin Mitov10.06%14.17%
Total1747100.00%24100.00%
Directory: drivers/base
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.