Release 4.7 drivers/gpu/drm/udl/udl_gem.c
  
  
/*
 * Copyright (C) 2012 Red Hat
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License v2. See the file COPYING in the main directory of this archive for
 * more details.
 */
#include <drm/drmP.h>
#include "udl_drv.h"
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
					    size_t size)
{
	struct udl_gem_object *obj;
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL)
		return NULL;
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
		kfree(obj);
		return NULL;
	}
	obj->flags = UDL_BO_CACHEABLE;
	return obj;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 71 | 92.21% | 1 | 50.00% | 
| haixia shi | haixia shi | 6 | 7.79% | 1 | 50.00% | 
 | Total | 77 | 100.00% | 2 | 100.00% | 
static int
udl_gem_create(struct drm_file *file,
	       struct drm_device *dev,
	       uint64_t size,
	       uint32_t *handle_p)
{
	struct udl_gem_object *obj;
	int ret;
	u32 handle;
	size = roundup(size, PAGE_SIZE);
	obj = udl_gem_alloc_object(dev, size);
	if (obj == NULL)
		return -ENOMEM;
	ret = drm_gem_handle_create(file, &obj->base, &handle);
	if (ret) {
		drm_gem_object_release(&obj->base);
		kfree(obj);
		return ret;
	}
	drm_gem_object_unreference_unlocked(&obj->base);
	*handle_p = handle;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 114 | 99.13% | 1 | 50.00% | 
| daniel vetter | daniel vetter | 1 | 0.87% | 1 | 50.00% | 
 | Total | 115 | 100.00% | 2 | 100.00% | 
static void update_vm_cache_attr(struct udl_gem_object *obj,
				 struct vm_area_struct *vma)
{
	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
	/* non-cacheable as default. */
	if (obj->flags & UDL_BO_CACHEABLE) {
		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	} else if (obj->flags & UDL_BO_WC) {
		vma->vm_page_prot =
			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else {
		vma->vm_page_prot =
			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| haixia shi | haixia shi | 89 | 100.00% | 1 | 100.00% | 
 | Total | 89 | 100.00% | 1 | 100.00% | 
int udl_dumb_create(struct drm_file *file,
		    struct drm_device *dev,
		    struct drm_mode_create_dumb *args)
{
	args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
	args->size = args->pitch * args->height;
	return udl_gem_create(file, dev,
			      args->size, &args->handle);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 62 | 93.94% | 1 | 50.00% | 
| david herrmann | david herrmann | 4 | 6.06% | 1 | 50.00% | 
 | Total | 66 | 100.00% | 2 | 100.00% | 
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;
	ret = drm_gem_mmap(filp, vma);
	if (ret)
		return ret;
	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;
	update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| konstantin khlebnikov | konstantin khlebnikov | 50 | 80.65% | 1 | 50.00% | 
| haixia shi | haixia shi | 12 | 19.35% | 1 | 50.00% | 
 | Total | 62 | 100.00% | 2 | 100.00% | 
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
	struct page *page;
	unsigned int page_offset;
	int ret = 0;
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;
	if (!obj->pages)
		return VM_FAULT_SIGBUS;
	page = obj->pages[page_offset];
	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 126 | 100.00% | 1 | 100.00% | 
 | Total | 126 | 100.00% | 1 | 100.00% | 
int udl_gem_get_pages(struct udl_gem_object *obj)
{
	struct page **pages;
	if (obj->pages)
		return 0;
	pages = drm_gem_get_pages(&obj->base);
	if (IS_ERR(pages))
		return PTR_ERR(pages);
	obj->pages = pages;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 48 | 84.21% | 1 | 33.33% | 
| rob clark | rob clark | 8 | 14.04% | 1 | 33.33% | 
| al viro | al viro | 1 | 1.75% | 1 | 33.33% | 
 | Total | 57 | 100.00% | 3 | 100.00% | 
void udl_gem_put_pages(struct udl_gem_object *obj)
{
	if (obj->base.import_attach) {
		drm_free_large(obj->pages);
		obj->pages = NULL;
		return;
	}
	drm_gem_put_pages(&obj->base, obj->pages, false, false);
	obj->pages = NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rob clark | rob clark | 32 | 57.14% | 2 | 66.67% | 
| dave airlie | dave airlie | 24 | 42.86% | 1 | 33.33% | 
 | Total | 56 | 100.00% | 3 | 100.00% | 
int udl_gem_vmap(struct udl_gem_object *obj)
{
	int page_count = obj->base.size / PAGE_SIZE;
	int ret;
	if (obj->base.import_attach) {
		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
		if (!obj->vmapping)
			return -ENOMEM;
		return 0;
	}
		
	ret = udl_gem_get_pages(obj);
	if (ret)
		return ret;
	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
	if (!obj->vmapping)
		return -ENOMEM;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 108 | 100.00% | 2 | 100.00% | 
 | Total | 108 | 100.00% | 2 | 100.00% | 
void udl_gem_vunmap(struct udl_gem_object *obj)
{
	if (obj->base.import_attach) {
		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
		return;
	}
	vunmap(obj->vmapping);
	udl_gem_put_pages(obj);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 48 | 100.00% | 2 | 100.00% | 
 | Total | 48 | 100.00% | 2 | 100.00% | 
void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct udl_gem_object *obj = to_udl_bo(gem_obj);
	if (obj->vmapping)
		udl_gem_vunmap(obj);
	if (gem_obj->import_attach) {
		drm_prime_gem_destroy(gem_obj, obj->sg);
		put_device(gem_obj->dev->dev);
	}
	if (obj->pages)
		udl_gem_put_pages(obj);
	drm_gem_free_mmap_offset(gem_obj);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 73 | 100.00% | 4 | 100.00% | 
 | Total | 73 | 100.00% | 4 | 100.00% | 
/* the dumb interface doesn't work with the GEM straight MMAP
   interface, it expects to do MMAP on the drm fd, like normal */
int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
		 uint32_t handle, uint64_t *offset)
{
	struct udl_gem_object *gobj;
	struct drm_gem_object *obj;
	int ret = 0;
	mutex_lock(&dev->struct_mutex);
	obj = drm_gem_object_lookup(file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
	gobj = to_udl_bo(obj);
	ret = udl_gem_get_pages(gobj);
	if (ret)
		goto out;
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto out;
	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
	drm_gem_object_unreference(&gobj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 133 | 94.33% | 1 | 33.33% | 
| david herrmann | david herrmann | 5 | 3.55% | 1 | 33.33% | 
| dan carpenter | dan carpenter | 3 | 2.13% | 1 | 33.33% | 
 | Total | 141 | 100.00% | 3 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 820 | 79.46% | 4 | 28.57% | 
| haixia shi | haixia shi | 107 | 10.37% | 1 | 7.14% | 
| konstantin khlebnikov | konstantin khlebnikov | 50 | 4.84% | 1 | 7.14% | 
| rob clark | rob clark | 40 | 3.88% | 2 | 14.29% | 
| david herrmann | david herrmann | 9 | 0.87% | 2 | 14.29% | 
| dan carpenter | dan carpenter | 3 | 0.29% | 1 | 7.14% | 
| daniel vetter | daniel vetter | 1 | 0.10% | 1 | 7.14% | 
| david howells | david howells | 1 | 0.10% | 1 | 7.14% | 
| al viro | al viro | 1 | 0.10% | 1 | 7.14% | 
 | Total | 1032 | 100.00% | 14 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.