Contributors: 4
Author Tokens Token Proportion Commits Commit Proportion
Eric Anholt 926 98.93% 4 57.14%
Dan Carpenter 7 0.75% 1 14.29%
Gerd Hoffmann 2 0.21% 1 14.29%
Souptick Joarder 1 0.11% 1 14.29%
Total 936 7


// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */

/**
 * DOC: V3D GEM BO management support
 *
 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
 * GPU and the bus, allowing us to use shmem objects for our storage
 * instead of CMA.
 *
 * Physically contiguous objects may still be imported to V3D, but the
 * driver doesn't allocate physically contiguous objects on its own.
 * Display engines requiring physically contiguous allocations should
 * look into Mesa's "renderonly" support (as used by the Mesa pl111
 * driver) for an example of how to integrate with V3D.
 *
 * Long term, we should support evicting pages from the MMU when under
 * memory pressure (thus the v3d_bo_get_pages() refcounting), but
 * that's not a high priority since our systems tend to not have swap.
 */

#include <linux/dma-buf.h>
#include <linux/pfn_t.h>

#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"

/* Called DRM core on the last userspace/kernel unreference of the
 * BO.
 */
void v3d_free_object(struct drm_gem_object *obj)
{
	struct v3d_dev *v3d = to_v3d_dev(obj->dev);
	struct v3d_bo *bo = to_v3d_bo(obj);

	v3d_mmu_remove_ptes(bo);

	mutex_lock(&v3d->bo_lock);
	v3d->bo_stats.num_allocated--;
	v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
	mutex_unlock(&v3d->bo_lock);

	spin_lock(&v3d->mm_lock);
	drm_mm_remove_node(&bo->node);
	spin_unlock(&v3d->mm_lock);

	/* GPU execution may have dirtied any pages in the BO. */
	bo->base.pages_mark_dirty_on_put = true;

	drm_gem_shmem_free_object(obj);
}

static const struct drm_gem_object_funcs v3d_gem_funcs = {
	.free = v3d_free_object,
	.print_info = drm_gem_shmem_print_info,
	.pin = drm_gem_shmem_pin,
	.unpin = drm_gem_shmem_unpin,
	.get_sg_table = drm_gem_shmem_get_sg_table,
	.vmap = drm_gem_shmem_vmap,
	.vunmap = drm_gem_shmem_vunmap,
	.mmap = drm_gem_shmem_mmap,
};

/* gem_create_object function for allocating a BO struct and doing
 * early setup.
 */
struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
	struct v3d_bo *bo;
	struct drm_gem_object *obj;

	if (size == 0)
		return NULL;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return NULL;
	obj = &bo->base.base;

	obj->funcs = &v3d_gem_funcs;

	INIT_LIST_HEAD(&bo->unref_head);

	return &bo->base.base;
}

static int
v3d_bo_create_finish(struct drm_gem_object *obj)
{
	struct v3d_dev *v3d = to_v3d_dev(obj->dev);
	struct v3d_bo *bo = to_v3d_bo(obj);
	struct sg_table *sgt;
	int ret;

	/* So far we pin the BO in the MMU for its lifetime, so use
	 * shmem's helper for getting a lifetime sgt.
	 */
	sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
	if (IS_ERR(sgt))
		return PTR_ERR(sgt);

	spin_lock(&v3d->mm_lock);
	/* Allocate the object's space in the GPU's page tables.
	 * Inserting PTEs will happen later, but the offset is for the
	 * lifetime of the BO.
	 */
	ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
					 obj->size >> PAGE_SHIFT,
					 GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
	spin_unlock(&v3d->mm_lock);
	if (ret)
		return ret;

	/* Track stats for /debug/dri/n/bo_stats. */
	mutex_lock(&v3d->bo_lock);
	v3d->bo_stats.num_allocated++;
	v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
	mutex_unlock(&v3d->bo_lock);

	v3d_mmu_insert_ptes(bo);

	return 0;
}

struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
			     size_t unaligned_size)
{
	struct drm_gem_shmem_object *shmem_obj;
	struct v3d_bo *bo;
	int ret;

	shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
	if (IS_ERR(shmem_obj))
		return ERR_CAST(shmem_obj);
	bo = to_v3d_bo(&shmem_obj->base);

	ret = v3d_bo_create_finish(&shmem_obj->base);
	if (ret)
		goto free_obj;

	return bo;

free_obj:
	drm_gem_shmem_free_object(&shmem_obj->base);
	return ERR_PTR(ret);
}

struct drm_gem_object *
v3d_prime_import_sg_table(struct drm_device *dev,
			  struct dma_buf_attachment *attach,
			  struct sg_table *sgt)
{
	struct drm_gem_object *obj;
	int ret;

	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
	if (IS_ERR(obj))
		return obj;

	ret = v3d_bo_create_finish(obj);
	if (ret) {
		drm_gem_shmem_free_object(obj);
		return ERR_PTR(ret);
	}

	return obj;
}

int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
	struct drm_v3d_create_bo *args = data;
	struct v3d_bo *bo = NULL;
	int ret;

	if (args->flags != 0) {
		DRM_INFO("unknown create_bo flags: %d\n", args->flags);
		return -EINVAL;
	}

	bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	args->offset = bo->node.start << PAGE_SHIFT;

	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
	drm_gem_object_put_unlocked(&bo->base.base);

	return ret;
}

int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_v3d_mmap_bo *args = data;
	struct drm_gem_object *gem_obj;

	if (args->flags != 0) {
		DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
		return -EINVAL;
	}

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
		return -ENOENT;
	}

	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
	drm_gem_object_put_unlocked(gem_obj);

	return 0;
}

int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file_priv)
{
	struct drm_v3d_get_bo_offset *args = data;
	struct drm_gem_object *gem_obj;
	struct v3d_bo *bo;

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
		return -ENOENT;
	}
	bo = to_v3d_bo(gem_obj);

	args->offset = bo->node.start << PAGE_SHIFT;

	drm_gem_object_put_unlocked(gem_obj);
	return 0;
}