Release 4.7 drivers/gpu/drm/msm/msm_gem.c
  
  
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 54 | 100.00% | 1 | 100.00% | 
 | Total | 54 | 100.00% | 1 | 100.00% | 
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 27 | 100.00% | 1 | 100.00% | 
 | Total | 27 | 100.00% | 1 | 100.00% | 
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj,
		int npages)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;
	p = drm_malloc_ab(npages, sizeof(struct page *));
	if (!p)
		return ERR_PTR(-ENOMEM);
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
			npages, 0, DRM_MM_SEARCH_DEFAULT);
	if (ret) {
		drm_free_large(p);
		return ERR_PTR(ret);
	}
	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}
	return p;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 156 | 100.00% | 1 | 100.00% | 
 | Total | 156 | 100.00% | 1 | 100.00% | 
/* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
		struct page **p;
		int npages = obj->size >> PAGE_SHIFT;
		if (use_pages(obj))
			p = drm_gem_get_pages(obj);
		else
			p = get_pages_vram(obj, npages);
		if (IS_ERR(p)) {
			dev_err(dev->dev, "could not get pages: %ld\n",
					PTR_ERR(p));
			return p;
		}
		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
		if (IS_ERR(msm_obj->sgt)) {
			dev_err(dev->dev, "failed to allocate sgt\n");
			return ERR_CAST(msm_obj->sgt);
		}
		msm_obj->pages = p;
		/* For non-cached buffers, ensure the new pages are clean
                 * because display controller, GPU, etc. are not coherent:
                 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}
	return msm_obj->pages;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 184 | 96.34% | 3 | 75.00% | 
| wei yongjun | wei yongjun | 7 | 3.66% | 1 | 25.00% | 
 | Total | 191 | 100.00% | 4 | 100.00% | 
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	if (msm_obj->pages) {
		/* For non-cached buffers, ensure the new pages are clean
                 * because display controller, GPU, etc. are not coherent:
                 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
		sg_free_table(msm_obj->sgt);
		kfree(msm_obj->sgt);
		if (use_pages(obj))
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
		else {
			drm_mm_remove_node(msm_obj->vram_node);
			drm_free_large(msm_obj->pages);
		}
		msm_obj->pages = NULL;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 113 | 92.62% | 3 | 75.00% | 
| micah richert | micah richert | 9 | 7.38% | 1 | 25.00% | 
 | Total | 122 | 100.00% | 4 | 100.00% | 
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct page **p;
	mutex_lock(&dev->struct_mutex);
	p = get_pages(obj);
	mutex_unlock(&dev->struct_mutex);
	return p;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 54 | 100.00% | 1 | 100.00% | 
 | Total | 54 | 100.00% | 1 | 100.00% | 
void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 11 | 100.00% | 1 | 100.00% | 
 | Total | 11 | 100.00% | 1 | 100.00% | 
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;
	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
                 * Shunt off cached objs to shmem file so they have their own
                 * address_space (so unmap_mapping_range does what we want,
                 * in particular in the case of mmap'd dmabufs)
                 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;
		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 133 | 100.00% | 1 | 100.00% | 
 | Total | 133 | 100.00% | 1 | 100.00% | 
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;
	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}
	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 53 | 100.00% | 1 | 100.00% | 
 | Total | 53 | 100.00% | 1 | 100.00% | 
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_device *dev = obj->dev;
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
	int ret;
	/* Make sure we don't parallel update on a fault, nor move or remove
         * something from beneath our feet
         */
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		goto out;
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out_unlock;
	}
	/* We don't use vmf->pgoff since that has the fake offset: */
	pgoff = ((unsigned long)vmf->virtual_address -
			vma->vm_start) >> PAGE_SHIFT;
	pfn = page_to_pfn(pages[pgoff]);
	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
			pfn, pfn << PAGE_SHIFT);
	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
			__pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
	mutex_unlock(&dev->struct_mutex);
out:
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
	case -EBUSY:
		/*
                 * EBUSY is ok: this just means that another thread
                 * already did the job.
                 */
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 207 | 97.64% | 2 | 66.67% | 
| dan williams | dan williams | 5 | 2.36% | 1 | 33.33% | 
 | Total | 212 | 100.00% | 3 | 100.00% | 
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	int ret;
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);
	if (ret) {
		dev_err(dev->dev, "could not allocate mmap offset\n");
		return 0;
	}
	return drm_vma_node_offset_addr(&obj->vma_node);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 70 | 100.00% | 1 | 100.00% | 
 | Total | 70 | 100.00% | 1 | 100.00% | 
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
	mutex_lock(&obj->dev->struct_mutex);
	offset = mmap_offset(obj);
	mutex_unlock(&obj->dev->struct_mutex);
	return offset;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 43 | 100.00% | 2 | 100.00% | 
 | Total | 43 | 100.00% | 2 | 100.00% | 
/* should be called under struct_mutex.. although it can be called
 * from atomic context without struct_mutex to acquire an extra
 * iova ref if you know one is already held.
 *
 * That means when I do eventually need to add support for unpinning
 * the refcnt counter needs to be atomic_t.
 */
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
		uint32_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret = 0;
	if (!msm_obj->domain[id].iova) {
		struct msm_drm_private *priv = obj->dev->dev_private;
		struct page **pages = get_pages(obj);
		if (IS_ERR(pages))
			return PTR_ERR(pages);
		if (iommu_present(&platform_bus_type)) {
			struct msm_mmu *mmu = priv->mmus[id];
			uint32_t offset;
			if (WARN_ON(!mmu))
				return -EINVAL;
			offset = (uint32_t)mmap_offset(obj);
			ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
					obj->size, IOMMU_READ | IOMMU_WRITE);
			msm_obj->domain[id].iova = offset;
		} else {
			msm_obj->domain[id].iova = physaddr(obj);
		}
	}
	if (!ret)
		*iova = msm_obj->domain[id].iova;
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 201 | 100.00% | 3 | 100.00% | 
 | Total | 201 | 100.00% | 3 | 100.00% | 
/* get iova, taking a reference.  Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret;
	/* this is safe right now because we don't unmap until the
         * bo is deleted:
         */
	if (msm_obj->domain[id].iova) {
		*iova = msm_obj->domain[id].iova;
		return 0;
	}
	mutex_lock(&obj->dev->struct_mutex);
	ret = msm_gem_get_iova_locked(obj, id, iova);
	mutex_unlock(&obj->dev->struct_mutex);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 93 | 100.00% | 2 | 100.00% | 
 | Total | 93 | 100.00% | 2 | 100.00% | 
/* get iova without taking a reference, used in places where you have
 * already done a 'msm_gem_get_iova()'.
 */
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	WARN_ON(!msm_obj->domain[id].iova);
	return msm_obj->domain[id].iova;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 46 | 100.00% | 1 | 100.00% | 
 | Total | 46 | 100.00% | 1 | 100.00% | 
void msm_gem_put_iova(struct drm_gem_object *obj, int id)
{
	// XXX TODO ..
	// NOTE: probably don't need a _locked() version.. we wouldn't
	// normally unmap here, but instead just mark that it could be
	// unmapped (if the iova refcnt drops to zero), but then later
	// if another _get_iova_locked() fails we can start unmapping
	// things that are no longer needed..
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 19 | 100.00% | 1 | 100.00% | 
 | Total | 19 | 100.00% | 1 | 100.00% | 
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 71 | 100.00% | 1 | 100.00% | 
 | Total | 71 | 100.00% | 1 | 100.00% | 
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;
	/* GEM does all our handle to object mapping */
	obj = drm_gem_object_lookup(file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}
	*offset = msm_gem_mmap_offset(obj);
	drm_gem_object_unreference_unlocked(obj);
fail:
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 76 | 100.00% | 1 | 100.00% | 
 | Total | 76 | 100.00% | 1 | 100.00% | 
void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
		if (IS_ERR(pages))
			return ERR_CAST(pages);
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
		if (msm_obj->vaddr == NULL)
			return ERR_PTR(-ENOMEM);
	}
	return msm_obj->vaddr;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 110 | 100.00% | 2 | 100.00% | 
 | Total | 110 | 100.00% | 2 | 100.00% | 
void *msm_gem_vaddr(struct drm_gem_object *obj)
{
	void *ret;
	mutex_lock(&obj->dev->struct_mutex);
	ret = msm_gem_vaddr_locked(obj);
	mutex_unlock(&obj->dev->struct_mutex);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 45 | 100.00% | 1 | 100.00% | 
 | Total | 45 | 100.00% | 1 | 100.00% | 
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct reservation_object_list *fobj;
	struct fence *fence;
	int i, ret;
	if (!exclusive) {
		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
                 * which makes this a slightly strange place to call it.  OTOH this
                 * is a convenient can-fail point to hook it in.  (And similar to
                 * how etnaviv and nouveau handle this.)
                 */
		ret = reservation_object_reserve_shared(msm_obj->resv);
		if (ret)
			return ret;
	}
	fobj = reservation_object_get_list(msm_obj->resv);
	if (!fobj || (fobj->shared_count == 0)) {
		fence = reservation_object_get_excl(msm_obj->resv);
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
			ret = fence_wait(fence, true);
			if (ret)
				return ret;
		}
	}
	if (!exclusive || !fobj)
		return 0;
	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(msm_obj->resv));
		if (fence->context != fctx->context) {
			ret = fence_wait(fence, true);
			if (ret)
				return ret;
		}
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 211 | 100.00% | 1 | 100.00% | 
 | Total | 211 | 100.00% | 1 | 100.00% | 
void msm_gem_move_to_active(struct drm_gem_object *obj,
		struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	msm_obj->gpu = gpu;
	if (exclusive)
		reservation_object_add_excl_fence(msm_obj->resv, fence);
	else
		reservation_object_add_shared_fence(msm_obj->resv, fence);
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 83 | 100.00% | 3 | 100.00% | 
 | Total | 83 | 100.00% | 3 | 100.00% | 
void msm_gem_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	msm_obj->gpu = NULL;
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 77 | 100.00% | 1 | 100.00% | 
 | Total | 77 | 100.00% | 1 | 100.00% | 
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	bool write = !!(op & MSM_PREP_WRITE);
	if (op & MSM_PREP_NOSYNC) {
		if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
			return -EBUSY;
	} else {
		int ret;
		ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
				true, timeout_to_jiffies(timeout));
		if (ret <= 0)
			return ret == 0 ? -ETIMEDOUT : ret;
	}
	/* TODO cache maintenance */
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 105 | 99.06% | 8 | 88.89% | 
| wentao xu | wentao xu | 1 | 0.94% | 1 | 11.11% | 
 | Total | 106 | 100.00% | 9 | 100.00% | 
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 14 | 100.00% | 2 | 100.00% | 
 | Total | 14 | 100.00% | 2 | 100.00% | 
#ifdef CONFIG_DEBUG_FS
static void describe_fence(struct fence *fence, const char *type,
		struct seq_file *m)
{
	if (!fence_is_signaled(fence))
		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 60 | 100.00% | 1 | 100.00% | 
 | Total | 60 | 100.00% | 1 | 100.00% | 
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct reservation_object *robj = msm_obj->resv;
	struct reservation_object_list *fobj;
	struct fence *fence;
	uint64_t off = drm_vma_node_start(&obj->vma_node);
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
			obj->name, obj->refcount.refcount.counter,
			off, msm_obj->vaddr, obj->size);
	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;
		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}
	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 200 | 100.00% | 3 | 100.00% | 
 | Total | 200 | 100.00% | 3 | 100.00% | 
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;
	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
		seq_printf(m, "   ");
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}
	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 84 | 100.00% | 1 | 100.00% | 
 | Total | 84 | 100.00% | 1 | 100.00% | 
#endif
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = obj->dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int id;
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));
	list_del(&msm_obj->mm_list);
	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
		struct msm_mmu *mmu = priv->mmus[id];
		if (mmu && msm_obj->domain[id].iova) {
			uint32_t offset = msm_obj->domain[id].iova;
			mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
		}
	}
	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
		/* Don't drop the pages for imported dmabuf, as they are not
                 * ours, just free the array we allocated:
                 */
		if (msm_obj->pages)
			drm_free_large(msm_obj->pages);
		drm_prime_gem_destroy(obj, msm_obj->sgt);
	} else {
		vunmap(msm_obj->vaddr);
		put_pages(obj);
	}
	if (msm_obj->resv == &msm_obj->_resv)
		reservation_object_fini(msm_obj->resv);
	drm_gem_object_release(obj);
	kfree(msm_obj);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 234 | 96.30% | 5 | 83.33% | 
| jilai wang | jilai wang | 9 | 3.70% | 1 | 16.67% | 
 | Total | 243 | 100.00% | 6 | 100.00% | 
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		uint32_t size, uint32_t flags, uint32_t *handle)
{
	struct drm_gem_object *obj;
	int ret;
	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;
	obj = msm_gem_new(dev, size, flags);
	mutex_unlock(&dev->struct_mutex);
	if (IS_ERR(obj))
		return PTR_ERR(obj);
	ret = drm_gem_handle_create(file, obj, handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(obj);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 102 | 100.00% | 1 | 100.00% | 
 | Total | 102 | 100.00% | 1 | 100.00% | 
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
		struct reservation_object *resv,
		struct drm_gem_object **obj)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;
	unsigned sz;
	bool use_vram = false;
	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
		dev_err(dev->dev, "invalid cache flag: %x\n",
				(flags & MSM_BO_CACHE_MASK));
		return -EINVAL;
	}
	if (!iommu_present(&platform_bus_type))
		use_vram = true;
	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
		use_vram = true;
	if (WARN_ON(use_vram && !priv->vram.size))
		return -EINVAL;
	sz = sizeof(*msm_obj);
	if (use_vram)
		sz += sizeof(struct drm_mm_node);
	msm_obj = kzalloc(sz, GFP_KERNEL);
	if (!msm_obj)
		return -ENOMEM;
	if (use_vram)
		msm_obj->vram_node = (void *)&msm_obj[1];
	msm_obj->flags = flags;
	if (resv) {
		msm_obj->resv = resv;
	} else {
		msm_obj->resv = &msm_obj->_resv;
		reservation_object_init(msm_obj->resv);
	}
	INIT_LIST_HEAD(&msm_obj->submit_entry);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	*obj = &msm_obj->base;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 263 | 100.00% | 6 | 100.00% | 
 | Total | 263 | 100.00% | 6 | 100.00% | 
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	struct drm_gem_object *obj = NULL;
	int ret;
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	size = PAGE_ALIGN(size);
	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
	if (ret)
		goto fail;
	if (use_pages(obj)) {
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
	} else {
		drm_gem_private_object_init(dev, obj, size);
	}
	return obj;
fail:
	if (obj)
		drm_gem_object_unreference(obj);
	return ERR_PTR(ret);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 129 | 100.00% | 5 | 100.00% | 
 | Total | 129 | 100.00% | 5 | 100.00% | 
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		struct dma_buf *dmabuf, struct sg_table *sgt)
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
	uint32_t size;
	int ret, npages;
	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
		dev_err(dev->dev, "cannot import without IOMMU\n");
		return ERR_PTR(-EINVAL);
	}
	size = PAGE_ALIGN(dmabuf->size);
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
	if (ret)
		goto fail;
	drm_gem_private_object_init(dev, obj, size);
	npages = size / PAGE_SIZE;
	msm_obj = to_msm_bo(obj);
	msm_obj->sgt = sgt;
	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (!msm_obj->pages) {
		ret = -ENOMEM;
		goto fail;
	}
	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
	if (ret)
		goto fail;
	return obj;
fail:
	if (obj)
		drm_gem_object_unreference_unlocked(obj);
	return ERR_PTR(ret);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 205 | 100.00% | 4 | 100.00% | 
 | Total | 205 | 100.00% | 4 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 3571 | 99.06% | 21 | 80.77% | 
| micah richert | micah richert | 9 | 0.25% | 1 | 3.85% | 
| jilai wang | jilai wang | 9 | 0.25% | 1 | 3.85% | 
| dan williams | dan williams | 8 | 0.22% | 1 | 3.85% | 
| wei yongjun | wei yongjun | 7 | 0.19% | 1 | 3.85% | 
| wentao xu | wentao xu | 1 | 0.03% | 1 | 3.85% | 
 | Total | 3605 | 100.00% | 26 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.