Contributors: 7
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Christian König |
60 |
34.68% |
4 |
30.77% |
xinhui pan |
58 |
33.53% |
1 |
7.69% |
Dave Airlie |
36 |
20.81% |
4 |
30.77% |
Thomas Hellstrom |
13 |
7.51% |
1 |
7.69% |
Ben Skeggs |
4 |
2.31% |
1 |
7.69% |
Masahiro Yamada |
1 |
0.58% |
1 |
7.69% |
Dirk Hohndel |
1 |
0.58% |
1 |
7.69% |
Total |
173 |
|
13 |
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#ifndef _TTM_RANGE_MANAGER_H_
#define _TTM_RANGE_MANAGER_H_
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/drm_mm.h>
/**
* struct ttm_range_mgr_node
*
* @base: base clase we extend
* @mm_nodes: MM nodes, usually 1
*
* Extending the ttm_resource object to manage an address space allocation with
* one or more drm_mm_nodes.
*/
struct ttm_range_mgr_node {
struct ttm_resource base;
struct drm_mm_node mm_nodes[];
};
/**
* to_ttm_range_mgr_node
*
* @res: the resource to upcast
*
* Upcast the ttm_resource object into a ttm_range_mgr_node object.
*/
static inline struct ttm_range_mgr_node *
to_ttm_range_mgr_node(struct ttm_resource *res)
{
return container_of(res, struct ttm_range_mgr_node, base);
}
int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size);
int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type);
static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
unsigned int type, bool use_tt,
unsigned long p_size)
{
BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
}
static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
unsigned int type)
{
BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
return ttm_range_man_fini_nocheck(bdev, type);
}
#endif