Release 4.7 drivers/gpu/drm/ttm/ttm_bo_manager.c
  
  
/**************************************************************************
 *
 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
/**
 * Currently we use a spinlock for the lock, but a mutex *may* be
 * more appropriate to reduce scheduling latency if the range manager
 * ends up with very fragmented allocation patterns.
 */
struct ttm_range_manager {
	
struct drm_mm mm;
	
spinlock_t lock;
};
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
			       struct ttm_buffer_object *bo,
			       const struct ttm_place *place,
			       struct ttm_mem_reg *mem)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
	struct drm_mm *mm = &rman->mm;
	struct drm_mm_node *node = NULL;
	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
	unsigned long lpfn;
	int ret;
	lpfn = place->lpfn;
	if (!lpfn)
		lpfn = man->size;
	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (!node)
		return -ENOMEM;
	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
		sflags = DRM_MM_SEARCH_BELOW;
		aflags = DRM_MM_CREATE_TOP;
	}
	spin_lock(&rman->lock);
	ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
					  mem->page_alignment, 0,
					  place->fpfn, lpfn,
					  sflags, aflags);
	spin_unlock(&rman->lock);
	if (unlikely(ret)) {
		kfree(node);
	} else {
		mem->mm_node = node;
		mem->start = node->start;
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 129 | 60.56% | 1 | 12.50% | 
| david herrmann | david herrmann | 27 | 12.68% | 1 | 12.50% | 
| lauri kasanen | lauri kasanen | 20 | 9.39% | 1 | 12.50% | 
| thomas hellstrom | thomas hellstrom | 15 | 7.04% | 1 | 12.50% | 
| michel danzer | michel danzer | 13 | 6.10% | 2 | 25.00% | 
| christian koenig | christian koenig | 9 | 4.23% | 2 | 25.00% | 
 | Total | 213 | 100.00% | 8 | 100.00% | 
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
	if (mem->mm_node) {
		spin_lock(&rman->lock);
		drm_mm_remove_node(mem->mm_node);
		spin_unlock(&rman->lock);
		kfree(mem->mm_node);
		mem->mm_node = NULL;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 54 | 72.97% | 1 | 33.33% | 
| thomas hellstrom | thomas hellstrom | 12 | 16.22% | 1 | 33.33% | 
| david herrmann | david herrmann | 8 | 10.81% | 1 | 33.33% | 
 | Total | 74 | 100.00% | 3 | 100.00% | 
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct ttm_range_manager *rman;
	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
	if (!rman)
		return -ENOMEM;
	drm_mm_init(&rman->mm, 0, p_size);
	spin_lock_init(&rman->lock);
	man->priv = rman;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 54 | 76.06% | 1 | 50.00% | 
| thomas hellstrom | thomas hellstrom | 17 | 23.94% | 1 | 50.00% | 
 | Total | 71 | 100.00% | 2 | 100.00% | 
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
	struct drm_mm *mm = &rman->mm;
	spin_lock(&rman->lock);
	if (drm_mm_clean(mm)) {
		drm_mm_takedown(mm);
		spin_unlock(&rman->lock);
		kfree(rman);
		man->priv = NULL;
		return 0;
	}
	spin_unlock(&rman->lock);
	return -EBUSY;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 62 | 68.13% | 1 | 50.00% | 
| thomas hellstrom | thomas hellstrom | 29 | 31.87% | 1 | 50.00% | 
 | Total | 91 | 100.00% | 2 | 100.00% | 
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
			     const char *prefix)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
	spin_lock(&rman->lock);
	drm_mm_debug_table(&rman->mm, prefix);
	spin_unlock(&rman->lock);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 42 | 75.00% | 1 | 50.00% | 
| thomas hellstrom | thomas hellstrom | 14 | 25.00% | 1 | 50.00% | 
 | Total | 56 | 100.00% | 2 | 100.00% | 
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
	ttm_bo_man_init,
	ttm_bo_man_takedown,
	ttm_bo_man_get_node,
	ttm_bo_man_put_node,
	ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 380 | 67.50% | 1 | 11.11% | 
| thomas hellstrom | thomas hellstrom | 102 | 18.12% | 1 | 11.11% | 
| david herrmann | david herrmann | 35 | 6.22% | 1 | 11.11% | 
| lauri kasanen | lauri kasanen | 20 | 3.55% | 1 | 11.11% | 
| michel danzer | michel danzer | 13 | 2.31% | 2 | 22.22% | 
| christian koenig | christian koenig | 9 | 1.60% | 2 | 22.22% | 
| david howells | david howells | 4 | 0.71% | 1 | 11.11% | 
 | Total | 563 | 100.00% | 9 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.