Release 4.7 drivers/gpu/drm/i915/intel_ringbuffer.h
  
  
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 * to give some inclination as to some of the magic values used in the various
 * workarounds!
 */
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
/*
 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 *
 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 * cacheline, the Head Pointer must not be greater than the Tail
 * Pointer."
 */
#define I915_RING_FREE_SPACE 64
struct  intel_hw_status_page {
	
u32		*page_addr;
	
unsigned int	gfx_addr;
	
struct		drm_i915_gem_object *obj;
};
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 */
#define gen8_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
         GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
#define GEN8_WAIT_OFFSET(__ring, from)			     \
	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
         GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT(e) do { \
        if (!dev_priv->semaphore_obj) { \
                break; \
        } \
        (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
        (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
        (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
        (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
        (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
        (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
        } while(0)
enum intel_ring_hangcheck_action {
	
HANGCHECK_IDLE = 0,
	
HANGCHECK_WAIT,
	
HANGCHECK_ACTIVE,
	
HANGCHECK_KICK,
	
HANGCHECK_HUNG,
};
#define HANGCHECK_SCORE_RING_HUNG 31
struct intel_ring_hangcheck {
	
u64 acthd;
	
u32 seqno;
	
unsigned user_interrupts;
	
int score;
	
enum intel_ring_hangcheck_action action;
	
int deadlock;
	
u32 instdone[I915_NUM_INSTDONE_REG];
};
struct intel_ringbuffer {
	
struct drm_i915_gem_object *obj;
	
void __iomem *virtual_start;
	
struct i915_vma *vma;
	
struct intel_engine_cs *engine;
	
struct list_head link;
	
u32 head;
	
u32 tail;
	
int space;
	
int size;
	
int effective_size;
	
int reserved_size;
	/** We track the position of the requests in the ring buffer, and
         * when each is retired we increment last_retired_head as the GPU
         * must have finished processing the request and so we know we
         * can advance the ringbuffer up to that position.
         *
         * last_retired_head is set to -1 after the value is consumed so
         * we can detect new retirements.
         */
	
u32 last_retired_head;
};
struct	intel_context;
struct drm_i915_reg_table;
/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
struct  i915_ctx_workarounds {
	
struct i915_wa_ctx_bb {
		
u32 offset;
		
u32 size;
	} 
indirect_ctx, per_ctx;
	
struct drm_i915_gem_object *obj;
};
struct  intel_engine_cs {
	
const char	*name;
	
enum intel_engine_id {
		
RCS = 0,
		
BCS,
		
VCS,
		
VCS2,	/* Keep instances of the same type engine together. */
		
VECS
	} 
id;
#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
	
unsigned int exec_id;
	
unsigned int hw_id;
	
unsigned int guc_id; /* XXX same as hw_id? */
	
u32		mmio_base;
	
struct		drm_device *dev;
	
struct intel_ringbuffer *buffer;
	
struct list_head buffers;
	/*
         * A pool of objects to use as shadow copies of client batch buffers
         * when the command parser is enabled. Prevents the client from
         * modifying the batch contents after software parsing.
         */
	
struct i915_gem_batch_pool batch_pool;
	
struct intel_hw_status_page status_page;
	
struct i915_ctx_workarounds wa_ctx;
	
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
	
u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
	
struct drm_i915_gem_request *trace_irq_req;
	
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
	
void		(*irq_put)(struct intel_engine_cs *ring);
	
int		(*init_hw)(struct intel_engine_cs *ring);
	
int		(*init_context)(struct drm_i915_gem_request *req);
	
void		(*write_tail)(struct intel_engine_cs *ring,
				      u32 value);
	
int __must_check (*flush)(struct drm_i915_gem_request *req,
				  u32	invalidate_domains,
				  u32	flush_domains);
	
int		(*add_request)(struct drm_i915_gem_request *req);
	/* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last
         * seen value is good enough. Note that the seqno will always be
         * monotonic, even if not coherent.
         */
	
void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
	
u32		(*get_seqno)(struct intel_engine_cs *ring);
	
void		(*set_seqno)(struct intel_engine_cs *ring,
				     u32 seqno);
	
int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
					       u64 offset, u32 length,
					       unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
#define I915_DISPATCH_RS     0x4
	
void		(*cleanup)(struct intel_engine_cs *ring);
	/* GEN8 signal/wait table - never trust comments!
         *        signal to     signal to    signal to   signal to      signal to
         *          RCS            VCS          BCS        VECS          VCS2
         *      --------------------------------------------------------------------
         *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
         *      |-------------------------------------------------------------------
         *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
         *      |-------------------------------------------------------------------
         *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
         *      |-------------------------------------------------------------------
         * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
         *      |-------------------------------------------------------------------
         * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
         *      |-------------------------------------------------------------------
         *
         * Generalization:
         *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
         *  ie. transpose of g(x, y)
         *
         *       sync from      sync from    sync from    sync from     sync from
         *          RCS            VCS          BCS        VECS          VCS2
         *      --------------------------------------------------------------------
         *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
         *      |-------------------------------------------------------------------
         *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
         *      |-------------------------------------------------------------------
         *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
         *      |-------------------------------------------------------------------
         * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
         *      |-------------------------------------------------------------------
         * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
         *      |-------------------------------------------------------------------
         *
         * Generalization:
         *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
         *  ie. transpose of f(x, y)
         */
	struct {
		
u32	sync_seqno[I915_NUM_ENGINES-1];
		union {
			struct {
				/* our mbox written by others */
				
u32		wait[I915_NUM_ENGINES];
				/* mboxes this ring signals to */
				
i915_reg_t	signal[I915_NUM_ENGINES];
			} 
mbox;
			
u64		signal_ggtt[I915_NUM_ENGINES];
		};
		/* AKA wait() */
		
int	(*sync_to)(struct drm_i915_gem_request *to_req,
				   struct intel_engine_cs *from,
				   u32 seqno);
		
int	(*signal)(struct drm_i915_gem_request *signaller_req,
				  /* num_dwords needed by caller */
				  unsigned int num_dwords);
	} 
semaphore;
	/* Execlists */
	
struct tasklet_struct irq_tasklet;
	
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
	
struct list_head execlist_queue;
	
struct list_head execlist_retired_req_list;
	
unsigned int fw_domains;
	
unsigned int next_context_status_buffer;
	
unsigned int idle_lite_restore_wa;
	
bool disable_lite_restore_wa;
	
u32 ctx_desc_template;
	
u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
	
int		(*emit_request)(struct drm_i915_gem_request *request);
	
int		(*emit_flush)(struct drm_i915_gem_request *request,
				      u32 invalidate_domains,
				      u32 flush_domains);
	
int		(*emit_bb_start)(struct drm_i915_gem_request *req,
					 u64 offset, unsigned dispatch_flags);
	/**
         * List of objects currently involved in rendering from the
         * ringbuffer.
         *
         * Includes buffers having the contents of their GPU caches
         * flushed, not necessarily primitives.  last_read_req
         * represents when the rendering involved will be completed.
         *
         * A reference is held on the buffer while on this list.
         */
	
struct list_head active_list;
	/**
         * List of breadcrumbs associated with GPU requests currently
         * outstanding.
         */
	
struct list_head request_list;
	/**
         * Seqno of request most recently submitted to request_list.
         * Used exclusively by hang checker to avoid grabbing lock while
         * inspecting request list.
         */
	
u32 last_submitted_seqno;
	
unsigned user_interrupts;
	
bool gpu_caches_dirty;
	
wait_queue_head_t irq_queue;
	
struct intel_context *last_context;
	
struct intel_ring_hangcheck hangcheck;
	struct {
		
struct drm_i915_gem_object *obj;
		
u32 gtt_offset;
		
volatile u32 *cpu_page;
	} 
scratch;
	
bool needs_cmd_parser;
	/*
         * Table of commands the command parser needs to know about
         * for this ring.
         */
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
	/*
         * Table of registers allowed in commands that read/write registers.
         */
	
const struct drm_i915_reg_table *reg_tables;
	
int reg_table_count;
	/*
         * Returns the bitmask for the length field of the specified command.
         * Return 0 for an unrecognized/invalid command.
         *
         * If the command parser finds an entry for a command in the ring's
         * cmd_tables, it gets the command's length based on the table entry.
         * If not, it calls this function to determine the per-ring length field
         * encoding for the command (i.e. certain opcode ranges use certain bits
         * to encode the command length in the header).
         */
	
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
static inline bool
intel_engine_initialized(struct intel_engine_cs *engine)
{
	return engine->dev != NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave gordon | dave gordon | 10 | 52.63% | 1 | 20.00% | 
| chris wilson | chris wilson | 5 | 26.32% | 1 | 20.00% | 
| tvrtko ursulin | tvrtko ursulin | 3 | 15.79% | 2 | 40.00% | 
| oscar mateo | oscar mateo | 1 | 5.26% | 1 | 20.00% | 
 | Total | 19 | 100.00% | 5 | 100.00% | 
static inline unsigned
intel_engine_flag(struct intel_engine_cs *engine)
{
	return 1 << engine->id;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| daniel vetter | daniel vetter | 13 | 68.42% | 1 | 20.00% | 
| tvrtko ursulin | tvrtko ursulin | 3 | 15.79% | 2 | 40.00% | 
| chris wilson | chris wilson | 2 | 10.53% | 1 | 20.00% | 
| oscar mateo | oscar mateo | 1 | 5.26% | 1 | 20.00% | 
 | Total | 19 | 100.00% | 5 | 100.00% | 
static inline u32
intel_ring_sync_index(struct intel_engine_cs *engine,
		      struct intel_engine_cs *other)
{
	int idx;
	/*
         * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
         * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
         * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
         * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
         * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
         */
	idx = (other - engine) - 1;
	if (idx < 0)
		idx += I915_NUM_ENGINES;
	return idx;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 35 | 79.55% | 1 | 14.29% | 
| tvrtko ursulin | tvrtko ursulin | 3 | 6.82% | 2 | 28.57% | 
| oscar mateo | oscar mateo | 2 | 4.55% | 1 | 14.29% | 
| daniel vetter | daniel vetter | 2 | 4.55% | 1 | 14.29% | 
| rodrigo vivi | rodrigo vivi | 1 | 2.27% | 1 | 14.29% | 
| zou nan hai | zou nan hai | 1 | 2.27% | 1 | 14.29% | 
 | Total | 44 | 100.00% | 7 | 100.00% | 
static inline void
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
	mb();
	clflush(&engine->status_page.page_addr[reg]);
	mb();
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 23 | 67.65% | 1 | 25.00% | 
| chris wilson | chris wilson | 9 | 26.47% | 2 | 50.00% | 
| tvrtko ursulin | tvrtko ursulin | 2 | 5.88% | 1 | 25.00% | 
 | Total | 34 | 100.00% | 4 | 100.00% | 
static inline u32
intel_read_status_page(struct intel_engine_cs *engine, int reg)
{
	/* Ensure that the compiler doesn't optimize away the load. */
	return READ_ONCE(engine->status_page.page_addr[reg]);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| zou nan hai | zou nan hai | 16 | 55.17% | 1 | 12.50% | 
| chris wilson | chris wilson | 5 | 17.24% | 3 | 37.50% | 
| daniel vetter | daniel vetter | 3 | 10.34% | 1 | 12.50% | 
| tvrtko ursulin | tvrtko ursulin | 2 | 6.90% | 1 | 12.50% | 
| imre deak | imre deak | 2 | 6.90% | 1 | 12.50% | 
| oscar mateo | oscar mateo | 1 | 3.45% | 1 | 12.50% | 
 | Total | 29 | 100.00% | 8 | 100.00% | 
static inline void
intel_write_status_page(struct intel_engine_cs *engine,
			int reg, u32 value)
{
	engine->status_page.page_addr[reg] = value;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| mika kuoppala | mika kuoppala | 26 | 89.66% | 1 | 33.33% | 
| tvrtko ursulin | tvrtko ursulin | 2 | 6.90% | 1 | 33.33% | 
| oscar mateo | oscar mateo | 1 | 3.45% | 1 | 33.33% | 
 | Total | 29 | 100.00% | 3 | 100.00% | 
/*
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
 * 0x20-0x2f: Reserved (Gen6+)
 *
 * The area from dword 0x30 to 0x3ff is available for driver usage.
 */
#define I915_GEM_HWS_INDEX		0x30
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX	0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
				     struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
void intel_stop_engine(struct intel_engine_cs *engine);
void intel_cleanup_engine(struct intel_engine_cs *engine);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_engine_cs *engine,
				   u32 data)
{
	struct intel_ringbuffer *ringbuf = engine->buffer;
	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
	ringbuf->tail += 4;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 18 | 41.86% | 2 | 28.57% | 
| oscar mateo | oscar mateo | 15 | 34.88% | 3 | 42.86% | 
| zou nan hai | zou nan hai | 8 | 18.60% | 1 | 14.29% | 
| tvrtko ursulin | tvrtko ursulin | 2 | 4.65% | 1 | 14.29% | 
 | Total | 43 | 100.00% | 7 | 100.00% | 
static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
				       i915_reg_t reg)
{
	intel_ring_emit(engine, i915_mmio_reg_offset(reg));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ville syrjala | ville syrjala | 23 | 92.00% | 2 | 66.67% | 
| tvrtko ursulin | tvrtko ursulin | 2 | 8.00% | 1 | 33.33% | 
 | Total | 25 | 100.00% | 3 | 100.00% | 
static inline void intel_ring_advance(struct intel_engine_cs *engine)
{
	struct intel_ringbuffer *ringbuf = engine->buffer;
	ringbuf->tail &= ringbuf->size - 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 13 | 41.94% | 2 | 28.57% | 
| oscar mateo | oscar mateo | 12 | 38.71% | 3 | 42.86% | 
| zou nan hai | zou nan hai | 4 | 12.90% | 1 | 14.29% | 
| tvrtko ursulin | tvrtko ursulin | 2 | 6.45% | 1 | 14.29% | 
 | Total | 31 | 100.00% | 7 | 100.00% | 
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
void intel_fini_pipe_control(struct intel_engine_cs *engine);
int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
int init_workarounds_ring(struct intel_engine_cs *engine);
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
	return ringbuf->tail;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 13 | 76.47% | 1 | 33.33% | 
| oscar mateo | oscar mateo | 4 | 23.53% | 2 | 66.67% | 
 | Total | 17 | 100.00% | 3 | 100.00% | 
/*
 * Arbitrary size for largest possible 'add request' sequence. The code paths
 * are complex and variable. Empirical measurement shows that the worst case
 * is ILK at 136 words. Reserving too much is better than reserving too little
 * as that allows for corner cases that might have been missed. So the figure
 * has been rounded up to 160 words.
 */
#define MIN_SPACE_FOR_ADD_REQUEST	160
/*
 * Reserve space in the ring to guarantee that the i915_add_request() call
 * will always have sufficient room to do its stuff. The request creation
 * code calls this automatically.
 */
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
/* Cancel the reservation, e.g. because the request is being discarded. */
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
/* Use the reserved space - for use by i915_add_request() only. */
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
/* Finish with the reserved space - for use by i915_add_request() only. */
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
/* Legacy ringbuffer specific portion of reservation code: */
int intel_ring_reserve_space(struct drm_i915_gem_request *request);
#endif /* _INTEL_RINGBUFFER_H_ */
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 289 | 19.81% | 43 | 27.92% | 
| zou nan hai | zou nan hai | 201 | 13.78% | 2 | 1.30% | 
| oscar mateo | oscar mateo | 187 | 12.82% | 13 | 8.44% | 
| ben widawsky | ben widawsky | 120 | 8.22% | 11 | 7.14% | 
| daniel vetter | daniel vetter | 105 | 7.20% | 14 | 9.09% | 
| john harrison | john harrison | 102 | 6.99% | 18 | 11.69% | 
| mika kuoppala | mika kuoppala | 87 | 5.96% | 9 | 5.84% | 
| tvrtko ursulin | tvrtko ursulin | 81 | 5.55% | 11 | 7.14% | 
| arun siluvery | arun siluvery | 42 | 2.88% | 2 | 1.30% | 
| ville syrjala | ville syrjala | 37 | 2.54% | 4 | 2.60% | 
| brad volkin | brad volkin | 35 | 2.40% | 2 | 1.30% | 
| thomas daniel | thomas daniel | 28 | 1.92% | 4 | 2.60% | 
| imre deak | imre deak | 25 | 1.71% | 1 | 0.65% | 
| haihao xiang | haihao xiang | 20 | 1.37% | 2 | 1.30% | 
| dave gordon | dave gordon | 19 | 1.30% | 2 | 1.30% | 
| michel thierry | michel thierry | 16 | 1.10% | 2 | 1.30% | 
| yakui zhao | yakui zhao | 15 | 1.03% | 2 | 1.30% | 
| nicholas hoath | nicholas hoath | 10 | 0.69% | 2 | 1.30% | 
| jesse barnes | jesse barnes | 7 | 0.48% | 1 | 0.65% | 
| naresh kumar kachhi | naresh kumar kachhi | 7 | 0.48% | 1 | 0.65% | 
| jani nikula | jani nikula | 6 | 0.41% | 2 | 1.30% | 
| abdiel janulgue | abdiel janulgue | 4 | 0.27% | 1 | 0.65% | 
| tomas elf | tomas elf | 4 | 0.27% | 1 | 0.65% | 
| jordan justen | jordan justen | 4 | 0.27% | 1 | 0.65% | 
| alex dai | alex dai | 4 | 0.27% | 1 | 0.65% | 
| francisco jerez | francisco jerez | 3 | 0.21% | 1 | 0.65% | 
| rodrigo vivi | rodrigo vivi | 1 | 0.07% | 1 | 0.65% | 
 | Total | 1459 | 100.00% | 154 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.