Contributors: 6
| Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
| Jani Nikula |
159 |
53.18% |
8 |
61.54% |
| Maarten Lankhorst |
92 |
30.77% |
1 |
7.69% |
| Lucas De Marchi |
17 |
5.69% |
1 |
7.69% |
| Matthew Brost |
16 |
5.35% |
1 |
7.69% |
| Nirmoy Das |
14 |
4.68% |
1 |
7.69% |
| Dave Airlie |
1 |
0.33% |
1 |
7.69% |
| Total |
299 |
|
13 |
|
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_gem.h>
#include "xe_bo.h"
#include "intel_bo.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
/* legacy tiling is unused */
return false;
}
bool intel_bo_is_userptr(struct drm_gem_object *obj)
{
/* xe does not have userptr bos */
return false;
}
bool intel_bo_is_shmem(struct drm_gem_object *obj)
{
return false;
}
bool intel_bo_is_protected(struct drm_gem_object *obj)
{
return false;
}
void intel_bo_flush_if_display(struct drm_gem_object *obj)
{
}
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return drm_gem_prime_mmap(obj, vma);
}
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct ttm_bo_kmap_obj map;
void *src;
bool is_iomem;
int ret;
ret = xe_bo_lock(bo, true);
if (ret)
return ret;
ret = ttm_bo_kmap(&bo->ttm, offset >> PAGE_SHIFT, 1, &map);
if (ret)
goto out_unlock;
offset &= ~PAGE_MASK;
src = ttm_kmap_obj_virtual(&map, &is_iomem);
src += offset;
if (is_iomem)
memcpy_fromio(dst, (void __iomem *)src, size);
else
memcpy(dst, src, size);
ttm_bo_kunmap(&map);
out_unlock:
xe_bo_unlock(bo);
return ret;
}
struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
{
return NULL;
}
struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front)
{
return front;
}
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}