Contributors: 11
Author Tokens Token Proportion Commits Commit Proportion
Matthew Brost 403 72.22% 6 27.27%
Francois Dugast 72 12.90% 2 9.09%
Daniele Ceraolo Spurio 18 3.23% 2 9.09%
Rodrigo Vivi 17 3.05% 1 4.55%
Brian Welty 9 1.61% 1 4.55%
José Roberto de Souza 9 1.61% 2 9.09%
Tomasz Lis 8 1.43% 3 13.64%
Umesh Nerlige Ramappa 7 1.25% 2 9.09%
Thomas Hellstrom 6 1.08% 1 4.55%
Matthew Auld 5 0.90% 1 4.55%
K V P, Satyanarayana 4 0.72% 1 4.55%
Total 558 22


/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2021 Intel Corporation
 */

#ifndef _XE_EXEC_QUEUE_H_
#define _XE_EXEC_QUEUE_H_

#include "xe_exec_queue_types.h"
#include "xe_vm_types.h"

struct drm_device;
struct drm_file;
struct xe_device;
struct xe_file;

#define for_each_tlb_inval(__i)	\
	for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
	     __i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)

struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
					   u32 logical_mask, u16 width,
					   struct xe_hw_engine *hw_engine, u32 flags,
					   u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
						 struct xe_vm *vm,
						 enum xe_engine_class class,
						 u32 flags, u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
						struct xe_tile *tile,
						struct xe_vm *user_vm,
						u32 flags, u64 extensions);

void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);

static inline struct xe_exec_queue *
xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
{
	if (kref_get_unless_zero(&q->refcount))
		return q;

	return NULL;
}

struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);

static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
{
	kref_get(&q->refcount);
	return q;
}

static inline void xe_exec_queue_put(struct xe_exec_queue *q)
{
	kref_put(&q->refcount, xe_exec_queue_destroy);
}

static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
{
	return q->width > 1;
}

static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
{
	return q->pxp.type;
}

bool xe_exec_queue_is_lr(struct xe_exec_queue *q);

bool xe_exec_queue_is_idle(struct xe_exec_queue *q);

void xe_exec_queue_kill(struct xe_exec_queue *q);

int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file);
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
				     struct drm_file *file);
enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);

void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
					       struct xe_vm *vm);
struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
							  struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
				  struct dma_fence *fence);

void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
					    struct xe_vm *vm,
					    unsigned int type);

void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
						     unsigned int type);

struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
							 struct xe_vm *vm,
							 unsigned int type);

void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
					    struct xe_vm *vm,
					    struct dma_fence *fence,
					    unsigned int type);

void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);

int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);

struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);

#endif