cregit-Linux how code gets into the kernel

Release 4.11 drivers/media/v4l2-core/videobuf2-dma-contig.c

/*
 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
 * Author: Pawel Osciak <pawel@osciak.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>

#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-memops.h>


struct vb2_dc_buf {
	
struct device			*dev;
	
void				*vaddr;
	
unsigned long			size;
	
void				*cookie;
	
dma_addr_t			dma_addr;
	
unsigned long			attrs;
	
enum dma_data_direction		dma_dir;
	
struct sg_table			*dma_sgt;
	
struct frame_vector		*vec;

	/* MMAP related */
	
struct vb2_vmarea_handler	handler;
	
atomic_t			refcount;
	
struct sg_table			*sgt_base;

	/* DMABUF related */
	
struct dma_buf_attachment	*db_attach;
};

/*********************************************/
/*        scatterlist table functions        */
/*********************************************/


static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) { struct scatterlist *s; dma_addr_t expected = sg_dma_address(sgt->sgl); unsigned int i; unsigned long size = 0; for_each_sg(sgt->sgl, s, sgt->nents, i) { if (sg_dma_address(s) != expected) break; expected = sg_dma_address(s) + sg_dma_len(s); size += sg_dma_len(s); } return size; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski81100.00%1100.00%
Total81100.00%1100.00%

/*********************************************/ /* callbacks for all buffers */ /*********************************************/
static void *vb2_dc_cookie(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; return &buf->dma_addr; }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Pinchart24100.00%1100.00%
Total24100.00%1100.00%


static void *vb2_dc_vaddr(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; if (!buf->vaddr && buf->db_attach) buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); return buf->vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Philipp Zabel2451.06%150.00%
Laurent Pinchart2348.94%150.00%
Total47100.00%2100.00%


static unsigned int vb2_dc_num_users(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; return atomic_read(&buf->refcount); }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Pinchart27100.00%1100.00%
Total27100.00%1100.00%


static void vb2_dc_prepare(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; struct sg_table *sgt = buf->dma_sgt; /* DMABUF exporter will flush the cache for us */ if (!sgt || buf->db_attach) return; dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski5089.29%133.33%
Sumit Semwal58.93%133.33%
Tiffany Lin11.79%133.33%
Total56100.00%3100.00%


static void vb2_dc_finish(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; struct sg_table *sgt = buf->dma_sgt; /* DMABUF exporter will flush the cache for us */ if (!sgt || buf->db_attach) return; dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski5089.29%133.33%
Sumit Semwal58.93%133.33%
Tiffany Lin11.79%133.33%
Total56100.00%3100.00%

/*********************************************/ /* callbacks for MMAP buffers */ /*********************************************/
static void vb2_dc_put(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; if (!atomic_dec_and_test(&buf->refcount)) return; if (buf->sgt_base) { sg_free_table(buf->sgt_base); kfree(buf->sgt_base); } dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr, buf->attrs); put_device(buf->dev); kfree(buf); }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Pinchart4451.16%233.33%
Tomasz Stanislawski2933.72%233.33%
Pawel Osciak78.14%116.67%
Tomasz Figa66.98%116.67%
Total86100.00%6100.00%


static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_dc_buf *buf; if (WARN_ON(!dev)) return ERR_PTR(-EINVAL); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); if (attrs) buf->attrs = attrs; buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, GFP_KERNEL | gfp_flags, buf->attrs); if (!buf->cookie) { dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); kfree(buf); return ERR_PTR(-ENOMEM); } if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) buf->vaddr = buf->cookie; /* Prevent the device from being released while the buffer is used */ buf->dev = get_device(dev); buf->size = size; buf->dma_dir = dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dc_put; buf->handler.arg = buf; atomic_inc(&buf->refcount); return buf; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski12759.35%218.18%
Hans Verkuil3918.22%545.45%
Tomasz Figa2712.62%19.09%
Pawel Osciak125.61%19.09%
Krzysztof Kozlowski83.74%19.09%
Laurent Pinchart10.47%19.09%
Total214100.00%11100.00%


static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) { struct vb2_dc_buf *buf = buf_priv; int ret; if (!buf) { printk(KERN_ERR "No buffer to map\n"); return -EINVAL; } /* * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to * map whole buffer */ vma->vm_pgoff = 0; ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr, buf->size, buf->attrs); if (ret) { pr_err("Remapping memory failed, error: %d\n", ret); return ret; } vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = &buf->handler; vma->vm_ops = &vb2_common_vm_ops; vma->vm_ops->open(vma); pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", __func__, (unsigned long)buf->dma_addr, vma->vm_start, buf->size); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski14596.03%150.00%
Tomasz Figa63.97%150.00%
Total151100.00%2100.00%

/*********************************************/ /* DMABUF ops for exporters */ /*********************************************/ struct vb2_dc_attachment { struct sg_table sgt; enum dma_data_direction dma_dir; };
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, struct dma_buf_attachment *dbuf_attach) { struct vb2_dc_attachment *attach; unsigned int i; struct scatterlist *rd, *wr; struct sg_table *sgt; struct vb2_dc_buf *buf = dbuf->priv; int ret; attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return -ENOMEM; sgt = &attach->sgt; /* Copy the buf->base_sgt scatter list to the attachment, as we can't * map the same scatter list to multiple attachments at the same time. */ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); if (ret) { kfree(attach); return -ENOMEM; } rd = buf->sgt_base->sgl; wr = sgt->sgl; for (i = 0; i < sgt->orig_nents; ++i) { sg_set_page(wr, sg_page(rd), rd->length, rd->offset); rd = sg_next(rd); wr = sg_next(wr); } attach->dma_dir = DMA_NONE; dbuf_attach->priv = attach; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski19299.48%150.00%
Hans Verkuil10.52%150.00%
Total193100.00%2100.00%


static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, struct dma_buf_attachment *db_attach) { struct vb2_dc_attachment *attach = db_attach->priv; struct sg_table *sgt; if (!attach) return; sgt = &attach->sgt; /* release the scatterlist cache */ if (attach->dma_dir != DMA_NONE) dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, attach->dma_dir); sg_free_table(sgt); kfree(attach); db_attach->priv = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski8597.70%150.00%
Hans Verkuil22.30%150.00%
Total87100.00%2100.00%


static struct sg_table *vb2_dc_dmabuf_ops_map( struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) { struct vb2_dc_attachment *attach = db_attach->priv; /* stealing dmabuf mutex to serialize map/unmap operations */ struct mutex *lock = &db_attach->dmabuf->lock; struct sg_table *sgt; mutex_lock(lock); sgt = &attach->sgt; /* return previously mapped sg table */ if (attach->dma_dir == dma_dir) { mutex_unlock(lock); return sgt; } /* release any previous cache */ if (attach->dma_dir != DMA_NONE) { dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, attach->dma_dir); attach->dma_dir = DMA_NONE; } /* mapping to the client with new direction */ sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); if (!sgt->nents) { pr_err("failed to map scatterlist\n"); mutex_unlock(lock); return ERR_PTR(-EIO); } attach->dma_dir = dma_dir; mutex_unlock(lock); return sgt; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski15790.75%133.33%
Hans Verkuil95.20%133.33%
Ricardo Ribalda Delgado74.05%133.33%
Total173100.00%3100.00%


static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, struct sg_table *sgt, enum dma_data_direction dma_dir) { /* nothing to be done here */ }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski2095.24%150.00%
Hans Verkuil14.76%150.00%
Total21100.00%2100.00%


static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) { /* drop reference obtained in vb2_dc_get_dmabuf */ vb2_dc_put(dbuf->priv); }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski19100.00%1100.00%
Total19100.00%1100.00%


static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) { struct vb2_dc_buf *buf = dbuf->priv; return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski3485.00%150.00%
Tomasz Figa615.00%150.00%
Total40100.00%2100.00%


static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) { struct vb2_dc_buf *buf = dbuf->priv; return buf->vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski26100.00%1100.00%
Total26100.00%1100.00%


static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) { return vb2_dc_mmap(dbuf->priv, vma); }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski26100.00%1100.00%
Total26100.00%1100.00%

static struct dma_buf_ops vb2_dc_dmabuf_ops = { .attach = vb2_dc_dmabuf_ops_attach, .detach = vb2_dc_dmabuf_ops_detach, .map_dma_buf = vb2_dc_dmabuf_ops_map, .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, .kmap = vb2_dc_dmabuf_ops_kmap, .kmap_atomic = vb2_dc_dmabuf_ops_kmap, .vmap = vb2_dc_dmabuf_ops_vmap, .mmap = vb2_dc_dmabuf_ops_mmap, .release = vb2_dc_dmabuf_ops_release, };
static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) { int ret; struct sg_table *sgt; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { dev_err(buf->dev, "failed to alloc sg table\n"); return NULL; } ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, buf->size, buf->attrs); if (ret < 0) { dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); kfree(sgt); return NULL; } return sgt; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski5550.93%125.00%
Pawel Osciak4642.59%125.00%
Tomasz Figa65.56%125.00%
Marek Szyprowski10.93%125.00%
Total108100.00%4100.00%


static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) { struct vb2_dc_buf *buf = buf_priv; struct dma_buf *dbuf; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &vb2_dc_dmabuf_ops; exp_info.size = buf->size; exp_info.flags = flags; exp_info.priv = buf; if (!buf->sgt_base) buf->sgt_base = vb2_dc_get_base_sgt(buf); if (WARN_ON(!buf->sgt_base)) return NULL; dbuf = dma_buf_export(&exp_info); if (IS_ERR(dbuf)) return NULL; /* dmabuf keeps reference to vb2 buffer */ atomic_inc(&buf->refcount); return dbuf; }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski3932.77%120.00%
Sumit Semwal3327.73%120.00%
Pawel Osciak2823.53%120.00%
Marek Szyprowski1512.61%120.00%
Philipp Zabel43.36%120.00%
Total119100.00%5100.00%

/*********************************************/ /* callbacks for USERPTR buffers */ /*********************************************/
static void vb2_dc_put_userptr(void *buf_priv) { struct vb2_dc_buf *buf = buf_priv; struct sg_table *sgt = buf->dma_sgt; int i; struct page **pages; if (sgt) { /* * No need to sync to CPU, it's already synced to the CPU * since the finish() memop will have been called before this. */ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); pages = frame_vector_pages(buf->vec); /* sgt should exist only if vector contains pages... */ BUG_ON(IS_ERR(pages)); for (i = 0; i < frame_vector_count(buf->vec); i++) set_page_dirty_lock(pages[i]); sg_free_table(sgt); kfree(sgt); } vb2_destroy_framevec(buf->vec); kfree(buf); }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kara7255.81%120.00%
Marek Szyprowski3124.03%120.00%
Tomasz Stanislawski1914.73%120.00%
Pawel Osciak64.65%120.00%
Krzysztof Kozlowski10.78%120.00%
Total129100.00%5100.00%

/* * For some kind of reserved memory there might be no struct page available, * so all that can be done to support such 'pages' is to try to convert * pfn to dma address or at the last resort just assume that * dma address == physical address (like it has been assumed in earlier version * of videobuf2-dma-contig */ #ifdef __arch_pfn_to_dma
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) { return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski27100.00%1100.00%
Total27100.00%1100.00%

#elif defined(__pfn_to_bus)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) { return (dma_addr_t)__pfn_to_bus(pfn); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski25100.00%1100.00%
Total25100.00%1100.00%

#elif defined(__pfn_to_phys)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) { return (dma_addr_t)__pfn_to_phys(pfn); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski25100.00%1100.00%
Total25100.00%1100.00%

#else
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) { /* really, we cannot do anything better at this point */ return (dma_addr_t)(pfn) << PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski27100.00%1100.00%
Total27100.00%1100.00%

#endif
static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dc_buf *buf; struct frame_vector *vec; unsigned long offset; int n_pages, i; int ret = 0; struct sg_table *sgt; unsigned long contig_size; unsigned long dma_align = dma_get_cache_alignment(); /* Only cache aligned DMA transfers are reliable */ if (!IS_ALIGNED(vaddr | size, dma_align)) { pr_debug("user data must be aligned to %lu bytes\n", dma_align); return ERR_PTR(-EINVAL); } if (!size) { pr_debug("size is zero\n"); return ERR_PTR(-EINVAL); } if (WARN_ON(!dev)) return ERR_PTR(-EINVAL); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); buf->dev = dev; buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_buf; } buf->vec = vec; n_pages = frame_vector_count(vec); ret = frame_vector_to_pages(vec); if (ret < 0) { unsigned long *nums = frame_vector_pfns(vec); /* * Failed to convert to pages... Check the memory is physically * contiguous and use direct mapping */ for (i = 1; i < n_pages; i++) if (nums[i-1] + 1 != nums[i]) goto fail_pfnvec; buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]); goto out; } sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { pr_err("failed to allocate sg table\n"); ret = -ENOMEM; goto fail_pfnvec; } ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages, offset, size, GFP_KERNEL); if (ret) { pr_err("failed to initialize sg table\n"); goto fail_sgt; } /* * No need to sync to the device, this will happen later when the * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (sgt->nents <= 0) { pr_err("failed to map scatterlist\n"); ret = -EIO; goto fail_sgt_init; } contig_size = vb2_dc_get_contiguous_size(sgt); if (contig_size < size) { pr_err("contiguous mapping is too small %lu/%lu\n", contig_size, size); ret = -EFAULT; goto fail_map_sg; } buf->dma_addr = sg_dma_address(sgt->sgl); buf->dma_sgt = sgt; out: buf->size = size; return buf; fail_map_sg: dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); fail_sgt_init: sg_free_table(sgt); fail_sgt: kfree(sgt); fail_pfnvec: vb2_destroy_framevec(vec); fail_buf: kfree(buf); return ERR_PTR(ret); }

Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski28955.15%19.09%
Jan Kara11822.52%218.18%
Marek Szyprowski7113.55%218.18%
Hans Verkuil264.96%436.36%
Pawel Osciak183.44%19.09%
Krzysztof Kozlowski20.38%19.09%
Total524100.00%11100.00%

/*********************************************/ /* callbacks for DMABUF buffers */ /*********************************************/
static int vb2_dc_map_dmabuf(void *mem_priv) { struct vb2_dc_buf *buf = mem_priv; struct sg_table *sgt; unsigned long contig_size; if (WARN_ON(!buf->db_attach)) { pr_err("trying to pin a non attached buffer\n"); return -EINVAL; } if (WARN_ON(buf->dma_sgt)) { pr_err("dmabuf buffer is already pinned\n"); return 0; } /* get the associated scatterlist for this buffer */ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); if (IS_ERR(sgt)) { pr_err("Error getting dmabuf scatterlist\n"); return -EINVAL; } /* checking if dmabuf is big enough to store contiguous chunk */ contig_size = vb2_dc_get_contiguous_size(sgt); if (contig_size < buf->size) { pr_err("contiguous chunk is too small %lu/%lu b\n", contig_size, buf->size); dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); return -EFAULT; } buf->dma_addr = sg_dma_address(sgt->sgl); buf->dma_sgt = sgt; buf->vaddr = NULL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Sumit Semwal16395.88%133.33%
Philipp Zabel63.53%133.33%
Colin Cross10.59%133.33%
Total170100.00%3100.00%


static void vb2_dc_unmap_dmabuf(void *mem_priv) { struct vb2_dc_buf *buf = mem_priv; struct sg_table *sgt = buf->dma_sgt; if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); return; } if (WARN_ON(!sgt)) { pr_err("dmabuf buffer is already unpinned\n"); return; } if (buf->vaddr) { dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); buf->vaddr = NULL; } dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); buf->dma_addr = 0; buf->dma_sgt = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Sumit Semwal8575.89%150.00%
Philipp Zabel2724.11%150.00%
Total112100.00%2100.00%


static void vb2_dc_detach_dmabuf(void *mem_priv) { struct vb2_dc_buf *buf = mem_priv; /* if vb2 works correctly you should never detach mapped buffer */ if (WARN_ON(buf->dma_addr)) vb2_dc_unmap_dmabuf(buf); /* detach this attachment */ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); kfree(buf); }

Contributors

PersonTokensPropCommitsCommitProp
Sumit Semwal51100.00%1100.00%
Total51100.00%1100.00%


static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf, unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dc_buf *buf; struct dma_buf_attachment *dba; if (dbuf->size < size) return ERR_PTR(-EFAULT); if (WARN_ON(!dev)) return ERR_PTR(-EINVAL); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); buf->dev = dev; /* create attachment for the dmabuf with the user device */ dba = dma_buf_attach(dbuf, buf->dev); if (IS_ERR(dba)) { pr_err("failed to attach dmabuf\n"); kfree(buf); return dba; } buf->dma_dir = dma_dir; buf->size = size; buf->db_attach = dba; return buf; }

Contributors

PersonTokensPropCommitsCommitProp
Sumit Semwal12985.43%125.00%
Hans Verkuil2214.57%375.00%
Total151100.00%4100.00%

/*********************************************/ /* DMA CONTIG exported functions */ /*********************************************/ const struct vb2_mem_ops vb2_dma_contig_memops = { .alloc = vb2_dc_alloc, .put = vb2_dc_put, .get_dmabuf = vb2_dc_get_dmabuf, .cookie = vb2_dc_cookie, .vaddr = vb2_dc_vaddr, .mmap = vb2_dc_mmap, .get_userptr = vb2_dc_get_userptr, .put_userptr = vb2_dc_put_userptr, .prepare = vb2_dc_prepare, .finish = vb2_dc_finish, .map_dmabuf = vb2_dc_map_dmabuf, .unmap_dmabuf = vb2_dc_unmap_dmabuf, .attach_dmabuf = vb2_dc_attach_dmabuf, .detach_dmabuf = vb2_dc_detach_dmabuf, .num_users = vb2_dc_num_users, }; EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); /** * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size * @dev: device for configuring DMA parameters * @size: size of DMA max segment size to set * * To allow mapping the scatter-list into a single chunk in the DMA * address space, the device is required to have the DMA max segment * size parameter set to a value larger than the buffer size. Otherwise, * the DMA-mapping subsystem will split the mapping into max segment * size chunks. This function sets the DMA max segment size * parameter to let DMA-mapping map a buffer as a single chunk in DMA * address space. * This code assumes that the DMA-mapping subsystem will merge all * scatterlist segments if this is really possible (for example when * an IOMMU is available and enabled). * Ideally, this parameter should be set by the generic bus code, but it * is left with the default 64KiB value due to historical litmiations in * other subsystems (like limited USB host drivers) and there no good * place to set it to the proper value. * This function should be called from the drivers, which are known to * operate on platforms with IOMMU and provide access to shared buffers * (either USERPTR or DMABUF). This should be done before initializing * videobuf2 queue. */
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size) { if (!dev->dma_parms) { dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) return -ENOMEM; } if (dma_get_max_seg_size(dev) < size) return dma_set_max_seg_size(dev, size); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski7098.59%150.00%
Vincent Stehlé11.41%150.00%
Total71100.00%2100.00%

EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size); /* * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters * @dev: device for configuring DMA parameters * * This function releases resources allocated to configure DMA parameters * (see vb2_dma_contig_set_max_seg_size() function). It should be called from * device drivers on driver remove. */
void vb2_dma_contig_clear_max_seg_size(struct device *dev) { kfree(dev->dma_parms); dev->dma_parms = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski23100.00%1100.00%
Total23100.00%1100.00%

EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size); MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); MODULE_LICENSE("GPL");

Overall Contributors

PersonTokensPropCommitsCommitProp
Tomasz Stanislawski144045.01%411.43%
Sumit Semwal50315.72%25.71%
Marek Szyprowski45714.29%617.14%
Pawel Osciak2126.63%25.71%
Jan Kara1956.10%25.71%
Laurent Pinchart1454.53%25.71%
Hans Verkuil1013.16%720.00%
Philipp Zabel611.91%25.71%
Tomasz Figa571.78%12.86%
Krzysztof Kozlowski130.41%12.86%
Ricardo Ribalda Delgado70.22%12.86%
H Hartley Sweeten30.09%12.86%
Tiffany Lin20.06%12.86%
Junghak Sung10.03%12.86%
Vincent Stehlé10.03%12.86%
Colin Cross10.03%12.86%
Total3199100.00%35100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.