Contributors: 12
| Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
| Jani Nikula |
154 |
40.42% |
10 |
34.48% |
| Ville Syrjälä |
110 |
28.87% |
4 |
13.79% |
| Chris Wilson |
76 |
19.95% |
6 |
20.69% |
| Nirmoy Das |
14 |
3.67% |
1 |
3.45% |
| Maarten Lankhorst |
9 |
2.36% |
1 |
3.45% |
| Daniele Ceraolo Spurio |
7 |
1.84% |
1 |
3.45% |
| Jouni Högander |
4 |
1.05% |
1 |
3.45% |
| Joonas Lahtinen |
2 |
0.52% |
1 |
3.45% |
| José Roberto de Souza |
2 |
0.52% |
1 |
3.45% |
| Matthew Auld |
1 |
0.26% |
1 |
3.45% |
| Dave Airlie |
1 |
0.26% |
1 |
3.45% |
| Paulo Zanoni |
1 |
0.26% |
1 |
3.45% |
| Total |
381 |
|
29 |
|
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_gem.h>
#include "xe_bo.h"
#include "intel_bo.h"
#include "intel_frontbuffer.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
/* legacy tiling is unused */
return false;
}
bool intel_bo_is_userptr(struct drm_gem_object *obj)
{
/* xe does not have userptr bos */
return false;
}
bool intel_bo_is_shmem(struct drm_gem_object *obj)
{
return false;
}
bool intel_bo_is_protected(struct drm_gem_object *obj)
{
return xe_bo_is_protected(gem_to_xe_bo(obj));
}
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return drm_gem_prime_mmap(obj, vma);
}
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
return xe_bo_read(bo, offset, dst, size);
}
struct xe_frontbuffer {
struct intel_frontbuffer base;
struct drm_gem_object *obj;
struct kref ref;
};
struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj)
{
struct xe_frontbuffer *front;
front = kmalloc(sizeof(*front), GFP_KERNEL);
if (!front)
return NULL;
intel_frontbuffer_init(&front->base, obj->dev);
kref_init(&front->ref);
drm_gem_object_get(obj);
front->obj = obj;
return &front->base;
}
void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
{
struct xe_frontbuffer *front =
container_of(_front, typeof(*front), base);
kref_get(&front->ref);
}
static void frontbuffer_release(struct kref *ref)
{
struct xe_frontbuffer *front =
container_of(ref, typeof(*front), ref);
intel_frontbuffer_fini(&front->base);
drm_gem_object_put(front->obj);
kfree(front);
}
void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
{
struct xe_frontbuffer *front =
container_of(_front, typeof(*front), base);
kref_put(&front->ref, frontbuffer_release);
}
void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
{
}
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}