Release 4.11 drivers/gpu/drm/ttm/ttm_bo_manager.c
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
/**
* Currently we use a spinlock for the lock, but a mutex *may* be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns.
*/
struct ttm_range_manager {
struct drm_mm mm;
spinlock_t lock;
};
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
unsigned long lpfn;
int ret;
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, node,
mem->num_pages,
mem->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
kfree(node);
} else {
mem->mm_node = node;
mem->start = node->start;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Skeggs | 126 | 63.32% | 1 | 12.50% |
David Herrmann | 27 | 13.57% | 1 | 12.50% |
Thomas Hellstrom | 15 | 7.54% | 1 | 12.50% |
Chris Wilson | 10 | 5.03% | 1 | 12.50% |
Lauri Kasanen | 10 | 5.03% | 1 | 12.50% |
Christian König | 9 | 4.52% | 2 | 25.00% |
Michel Dänzer | 2 | 1.01% | 1 | 12.50% |
Total | 199 | 100.00% | 8 | 100.00% |
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
if (mem->mm_node) {
spin_lock(&rman->lock);
drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Skeggs | 54 | 72.97% | 1 | 33.33% |
Thomas Hellstrom | 12 | 16.22% | 1 | 33.33% |
David Herrmann | 8 | 10.81% | 1 | 33.33% |
Total | 74 | 100.00% | 3 | 100.00% |
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct ttm_range_manager *rman;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock);
man->priv = rman;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Skeggs | 54 | 76.06% | 1 | 50.00% |
Thomas Hellstrom | 17 | 23.94% | 1 | 50.00% |
Total | 71 | 100.00% | 2 | 100.00% |
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
spin_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
kfree(rman);
man->priv = NULL;
return 0;
}
spin_unlock(&rman->lock);
return -EBUSY;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Skeggs | 62 | 68.13% | 1 | 50.00% |
Thomas Hellstrom | 29 | 31.87% | 1 | 50.00% |
Total | 91 | 100.00% | 2 | 100.00% |
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&rman->lock);
drm_mm_print(&rman->mm, &p);
spin_unlock(&rman->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Skeggs | 40 | 60.61% | 1 | 33.33% |
Thomas Hellstrom | 14 | 21.21% | 1 | 33.33% |
Daniel Vetter | 12 | 18.18% | 1 | 33.33% |
Total | 66 | 100.00% | 3 | 100.00% |
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
.init = ttm_bo_man_init,
.takedown = ttm_bo_man_takedown,
.get_node = ttm_bo_man_get_node,
.put_node = ttm_bo_man_put_node,
.debug = ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Skeggs | 375 | 65.33% | 1 | 9.09% |
Thomas Hellstrom | 102 | 17.77% | 1 | 9.09% |
David Herrmann | 35 | 6.10% | 1 | 9.09% |
Kees Cook | 15 | 2.61% | 1 | 9.09% |
Daniel Vetter | 12 | 2.09% | 1 | 9.09% |
Chris Wilson | 10 | 1.74% | 1 | 9.09% |
Lauri Kasanen | 10 | 1.74% | 1 | 9.09% |
Christian König | 9 | 1.57% | 2 | 18.18% |
David Howells | 4 | 0.70% | 1 | 9.09% |
Michel Dänzer | 2 | 0.35% | 1 | 9.09% |
Total | 574 | 100.00% | 11 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.