Contributors: 11
Author Tokens Token Proportion Commits Commit Proportion
Jocelyn Falempe 402 65.05% 3 13.64%
Jani Nikula 173 27.99% 10 45.45%
Nirmoy Das 14 2.27% 1 4.55%
Maarten Lankhorst 9 1.46% 1 4.55%
Daniele Ceraolo Spurio 7 1.13% 1 4.55%
Chris Wilson 5 0.81% 1 4.55%
Gabriel Krisman Bertazi 3 0.49% 1 4.55%
Jesse Barnes 2 0.32% 1 4.55%
Paulo Zanoni 1 0.16% 1 4.55%
Dave Airlie 1 0.16% 1 4.55%
Matthew Auld 1 0.16% 1 4.55%
Total 618 22


// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */

#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
#include <drm/drm_panic.h>

#include "intel_fb.h"
#include "intel_display_types.h"

#include "xe_bo.h"
#include "intel_bo.h"

bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
	/* legacy tiling is unused */
	return false;
}

bool intel_bo_is_userptr(struct drm_gem_object *obj)
{
	/* xe does not have userptr bos */
	return false;
}

bool intel_bo_is_shmem(struct drm_gem_object *obj)
{
	return false;
}

bool intel_bo_is_protected(struct drm_gem_object *obj)
{
	return xe_bo_is_protected(gem_to_xe_bo(obj));
}

void intel_bo_flush_if_display(struct drm_gem_object *obj)
{
}

int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
	return drm_gem_prime_mmap(obj, vma);
}

int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
	struct xe_bo *bo = gem_to_xe_bo(obj);

	return xe_bo_read(bo, offset, dst, size);
}

struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
{
	return NULL;
}

struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
						   struct intel_frontbuffer *front)
{
	return front;
}

void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
	/* FIXME */
}

struct xe_panic_data {
	struct page **pages;
	int page;
	void *vaddr;
};

struct xe_framebuffer {
	struct intel_framebuffer base;
	struct xe_panic_data panic;
};

static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
{
	return &container_of_const(fb, struct xe_framebuffer, base)->panic;
}

static void xe_panic_kunmap(struct xe_panic_data *panic)
{
	if (panic->vaddr) {
		drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
		kunmap_local(panic->vaddr);
		panic->vaddr = NULL;
	}
}

/*
 * The scanout buffer pages are not mapped, so for each pixel,
 * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
 * Try to keep the map from the previous pixel, to avoid too much map/unmap.
 */
static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
				    unsigned int y, u32 color)
{
	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
	struct xe_panic_data *panic = to_xe_panic_data(fb);
	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
	unsigned int new_page;
	unsigned int offset;

	if (fb->panic_tiling)
		offset = fb->panic_tiling(sb->width, x, y);
	else
		offset = y * sb->pitch[0] + x * sb->format->cpp[0];

	new_page = offset >> PAGE_SHIFT;
	offset = offset % PAGE_SIZE;
	if (new_page != panic->page) {
		xe_panic_kunmap(panic);
		panic->page = new_page;
		panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
							  panic->page);
	}
	if (panic->vaddr) {
		u32 *pix = panic->vaddr + offset;
		*pix = color;
	}
}

struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
{
	struct xe_framebuffer *xe_fb;

	xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
	if (xe_fb)
		return &xe_fb->base;
	return NULL;
}

int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
{
	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
	struct xe_panic_data *panic = to_xe_panic_data(fb);

	panic->page = -1;
	sb->set_pixel = xe_panic_page_set_pixel;
	return 0;
}

void intel_bo_panic_finish(struct intel_framebuffer *fb)
{
	struct xe_panic_data *panic = to_xe_panic_data(fb);

	xe_panic_kunmap(panic);
	panic->page = -1;
}