Release 4.11 drivers/gpu/drm/i915/intel_ringbuffer.c
/*
* Copyright © 2008-2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Zou Nan hai <nanhai.zou@intel.com>
* Xiang Hai hao<haihao.xiang@intel.com>
*
*/
#include <linux/log2.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#include <drm/i915_drm.h>
#include "i915_trace.h"
#include "intel_drv.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
*/
#define LEGACY_REQUEST_SIZE 200
int __intel_ring_space(int head, int tail, int size)
{
int space = head - tail;
if (space <= 0)
space += size;
return space - I915_RING_FREE_SPACE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 32 | 88.89% | 2 | 50.00% |
Dave Gordon | 3 | 8.33% | 1 | 25.00% |
Oscar Mateo | 1 | 2.78% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
void intel_ring_update_space(struct intel_ring *ring)
{
if (ring->last_retired_head != -1) {
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
}
ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
ring->tail, ring->size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Gordon | 29 | 50.88% | 1 | 12.50% |
Chris Wilson | 20 | 35.09% | 3 | 37.50% |
Oscar Mateo | 8 | 14.04% | 4 | 50.00% |
Total | 57 | 100.00% | 8 | 100.00% |
static int
gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
u32 cmd;
int ret;
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 71 | 87.65% | 6 | 66.67% |
John Harrison | 9 | 11.11% | 2 | 22.22% |
Zou Nan hai | 1 | 1.23% | 1 | 11.11% |
Total | 81 | 100.00% | 9 | 100.00% |
static int
gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
u32 cmd;
int ret;
/*
* read/write caches:
*
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
* also flushed at 2d versus 3d pipeline switches.
*
* read-only caches:
*
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
* MI_READ_FLUSH is set, and is always flushed on 965.
*
* I915_GEM_DOMAIN_COMMAND may not exist?
*
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
* invalidated when MI_EXE_FLUSH is set.
*
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
* invalidated with every MI_FLUSH.
*
* TLBs:
*
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH.
*/
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
if (IS_G4X(req->i915) || IS_GEN5(req->i915))
cmd |= MI_INVALIDATE_ISP;
}
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 57 | 54.81% | 12 | 70.59% |
Eric Anholt | 26 | 25.00% | 1 | 5.88% |
Zou Nan hai | 13 | 12.50% | 2 | 11.76% |
John Harrison | 8 | 7.69% | 2 | 11.76% |
Total | 104 | 100.00% | 17 | 100.00% |
/**
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
*
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
* produced by non-pipelined state commands), software needs to first
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
* 0.
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
*
* And the workaround for these two requires this workaround first:
*
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
* BEFORE the pipe-control with a post-sync op and no write-cache
* flushes.
*
* And this last workaround is tricky because of the requirements on
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
* volume 2 part 1:
*
* "1 of the following must also be set:
* - Render Target Cache Flush Enable ([12] of DW1)
* - Depth Cache Flush Enable ([0] of DW1)
* - Stall at Pixel Scoreboard ([1] of DW1)
* - Depth Stall ([13] of DW1)
* - Post-Sync Operation ([13] of DW1)
* - Notify Enable ([8] of DW1)"
*
* The cache flushes require the workaround flush that triggered this
* one, so we can't use it. Depth stall would trigger the same.
* Post-sync nonzero is what triggered this second workaround, so we
* can't use that one either. Notify enable is IRQs, which aren't
* really our business. That leaves only stall at scoreboard.
*/
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0); /* low dword */
intel_ring_emit(ring, 0); /* high dword */
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Barnes | 145 | 79.67% | 1 | 10.00% |
Chris Wilson | 26 | 14.29% | 6 | 60.00% |
John Harrison | 10 | 5.49% | 2 | 20.00% |
Tvrtko A. Ursulin | 1 | 0.55% | 1 | 10.00% |
Total | 182 | 100.00% | 10 | 100.00% |
static int
gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = intel_emit_post_sync_nonzero_flush(req);
if (ret)
return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
* Ensure that any following seqno writes only happen
* when the render cache is indeed flushed.
*/
flags |= PIPE_CONTROL_CS_STALL;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Barnes | 93 | 51.67% | 2 | 11.11% |
Chris Wilson | 57 | 31.67% | 9 | 50.00% |
Paulo Zanoni | 14 | 7.78% | 1 | 5.56% |
John Harrison | 9 | 5.00% | 3 | 16.67% |
Ben Widawsky | 3 | 1.67% | 1 | 5.56% |
Daniel Vetter | 3 | 1.67% | 1 | 5.56% |
Tvrtko A. Ursulin | 1 | 0.56% | 1 | 5.56% |
Total | 180 | 100.00% | 18 | 100.00% |
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paulo Zanoni | 61 | 76.25% | 1 | 14.29% |
Chris Wilson | 10 | 12.50% | 4 | 57.14% |
John Harrison | 9 | 11.25% | 2 | 28.57% |
Total | 80 | 100.00% | 7 | 100.00% |
static int
gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
/*
* Ensure that any following seqno writes only happen when the render
* cache is indeed flushed.
*
* Workaround: 4th PIPE_CONTROL command (except the ones with only
* read-cache invalidate bits set) must have the CS_STALL bit set. We
* don't try to be clever and just set it unconditionally.
*/
flags |= PIPE_CONTROL_CS_STALL;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
gen7_render_ring_cs_stall_wa(req);
}
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paulo Zanoni | 128 | 68.45% | 2 | 11.11% |
Chris Wilson | 41 | 21.93% | 10 | 55.56% |
John Harrison | 9 | 4.81% | 3 | 16.67% |
Francisco Jerez | 4 | 2.14% | 1 | 5.56% |
Ville Syrjälä | 4 | 2.14% | 1 | 5.56% |
Tvrtko A. Ursulin | 1 | 0.53% | 1 | 5.56% |
Total | 187 | 100.00% | 18 | 100.00% |
static int
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kenneth Graunke | 77 | 78.57% | 1 | 14.29% |
Chris Wilson | 10 | 10.20% | 3 | 42.86% |
John Harrison | 9 | 9.18% | 2 | 28.57% |
Ben Widawsky | 2 | 2.04% | 1 | 14.29% |
Total | 98 | 100.00% | 7 | 100.00% |
static int
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
ret = gen8_emit_pipe_control(req,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD,
0);
if (ret)
return ret;
}
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 77 | 56.20% | 1 | 7.69% |
Kenneth Graunke | 26 | 18.98% | 2 | 15.38% |
Chris Wilson | 22 | 16.06% | 5 | 38.46% |
John Harrison | 6 | 4.38% | 2 | 15.38% |
Francisco Jerez | 4 | 2.92% | 1 | 7.69% |
Fengguang Wu | 1 | 0.73% | 1 | 7.69% |
Tvrtko A. Ursulin | 1 | 0.73% | 1 | 7.69% |
Total | 137 | 100.00% | 13 | 100.00% |
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_GEN(dev_priv) >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 55 | 90.16% | 1 | 25.00% |
Chris Wilson | 3 | 4.92% | 1 | 25.00% |
Tvrtko A. Ursulin | 2 | 3.28% | 1 | 25.00% |
Oscar Mateo | 1 | 1.64% | 1 | 25.00% |
Total | 61 | 100.00% | 4 | 100.00% |
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (IS_GEN7(dev_priv)) {
switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
/*
* VCS2 actually doesn't exist on Gen7. Only shut up
* gcc switch check warning
*/
case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(engine->mmio_base);
}
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
/*
* Flush the TLB for this page
*
* FIXME: These two bits have disappeared on gen8, so a question
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
if (IS_GEN(dev_priv, 6, 7)) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (intel_wait_for_register(dev_priv,
reg, INSTPM_SYNC_FLUSH, 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Damien Lespiau | 175 | 87.50% | 1 | 14.29% |
Tvrtko A. Ursulin | 13 | 6.50% | 2 | 28.57% |
Chris Wilson | 10 | 5.00% | 3 | 42.86% |
Ville Syrjälä | 2 | 1.00% | 1 | 14.29% |
Total | 200 | 100.00% | 7 | 100.00% |
static bool stop_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) > 2) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (intel_wait_for_register(dev_priv,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE,
MODE_IDLE,
1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
/* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
return false;
}
}
I915_WRITE_CTL(engine, 0);
I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0);
if (INTEL_GEN(dev_priv) > 2) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 83 | 55.33% | 6 | 35.29% |
Naresh Kumar Kachhi | 28 | 18.67% | 2 | 11.76% |
Tvrtko A. Ursulin | 13 | 8.67% | 1 | 5.88% |
Zou Nan hai | 8 | 5.33% | 1 | 5.88% |
Daniel Vetter | 8 | 5.33% | 3 | 17.65% |
Eric Anholt | 7 | 4.67% | 1 | 5.88% |
Oscar Mateo | 1 | 0.67% | 1 | 5.88% |
Xiang, Haihao | 1 | 0.67% | 1 | 5.88% |
Jani Nikula | 1 | 0.67% | 1 | 5.88% |
Total | 150 | 100.00% | 17 | 100.00% |
static int init_ring_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->buffer;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_HEAD(engine),
I915_READ_TAIL(engine),
I915_READ_START(engine));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_HEAD(engine),
I915_READ_TAIL(engine),
I915_READ_START(engine));
ret = -EIO;
goto out;
}
}
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
intel_ring_setup_status_page(engine);
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(engine))
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
engine->name, I915_READ_HEAD(engine));
intel_ring_update_space(ring);
I915_WRITE_HEAD(engine, ring->head);
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_CTL(engine) & RING_VALID,
I915_READ_HEAD(engine), ring->head,
I915_READ_TAIL(engine), ring->tail,
I915_READ_START(engine),
i915_ggtt_offset(ring->vma));
ret = -EIO;
goto out;
}
intel_engine_init_hangcheck(engine);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 173 | 51.03% | 13 | 41.94% |
Zou Nan hai | 46 | 13.57% | 1 | 3.23% |
Eric Anholt | 36 | 10.62% | 1 | 3.23% |
Tvrtko A. Ursulin | 32 | 9.44% | 1 | 3.23% |
Daniel Vetter | 32 | 9.44% | 6 | 19.35% |
Jiri Kosina | 5 | 1.47% | 1 | 3.23% |
Oscar Mateo | 4 | 1.18% | 3 | 9.68% |
Sean Paul | 3 | 0.88% | 1 | 3.23% |
Carlos Santa | 3 | 0.88% | 1 | 3.23% |
Mika Kuoppala | 2 | 0.59% | 1 | 3.23% |
Deepak S | 2 | 0.59% | 1 | 3.23% |
Tomas Elf | 1 | 0.29% | 1 | 3.23% |
Total | 339 | 100.00% | 31 | 100.00% |
static void reset_ring_common(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
/* Try to restore the logical GPU state to match the continuation
* of the request queue. If we skip the context/PD restore, then
* the next request may try to execute assuming that its context
* is valid and loaded on the GPU and so may try to access invalid
* memory, prompting repeated GPU hangs.
*
* If the request was guilty, we still restore the logical state
* in case the next request requires it (e.g. the aliasing ppgtt),
* but skip over the hung batch.
*
* If the request was innocent, we try to replay the request with
* the restored context.
*/
if (request) {
struct drm_i915_private *dev_priv = request->i915;
struct intel_context *ce = &request->ctx->engine[engine->id];
struct i915_hw_ppgtt *ppgtt;
/* FIXME consider gen8 reset */
if (ce->state) {
I915_WRITE(CCID,
i915_ggtt_offset(ce->state) |
BIT(8) /* must be set! */ |
CCID_EXTENDED_STATE_SAVE |
CCID_EXTENDED_STATE_RESTORE |
CCID_EN);
}
ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
if (ppgtt) {
u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
/* Wait for the PD reload to complete */
if (intel_wait_for_register(dev_priv,
RING_PP_DIR_BASE(engine),
BIT(0), 0,
10))
DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
/* If the rq hung, jump to its breadcrumb and skip the batch */
if (request->fence.error == -EIO) {
struct intel_ring *ring = request->ring;
ring->head = request->postfix;
ring->last_retired_head = -1;
}
} else {
engine->legacy_active_context = NULL;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 227 | 100.00% | 2 | 100.00% |
Total | 227 | 100.00% | 2 | 100.00% |
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
int ret, i;
if (w->count == 0)
return 0;
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
ret = intel_ring_begin(req, (w->count * 2 + 2));
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_ring_emit_reg(ring, w->reg[i].addr);
intel_ring_emit(ring, w->reg[i].value);
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mika Kuoppala | 105 | 54.40% | 1 | 6.25% |
Arun Siluvery | 43 | 22.28% | 3 | 18.75% |
Chris Wilson | 30 | 15.54% | 6 | 37.50% |
John Harrison | 11 | 5.70% | 3 | 18.75% |
Tvrtko A. Ursulin | 2 | 1.04% | 1 | 6.25% |
Michel Thierry | 1 | 0.52% | 1 | 6.25% |
Ville Syrjälä | 1 | 0.52% | 1 | 6.25% |
Total | 193 | 100.00% | 16 | 100.00% |
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
{
int ret;
ret = intel_ring_workarounds_emit(req);
if (ret != 0)
return ret;
ret = i915_gem_render_state_emit(req);
if (ret)
return ret;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 39 | 82.98% | 1 | 25.00% |
John Harrison | 4 | 8.51% | 1 | 25.00% |
Chris Wilson | 4 | 8.51% | 2 | 50.00% |
Total | 47 | 100.00% | 4 | 100.00% |
static int wa_add(struct drm_i915_private *dev_priv,
i915_reg_t addr,
const u32 mask, const u32 val)
{
const u32 idx = dev_priv->workarounds.count;
if (WARN_ON(idx >= I915_MAX_WA_REGS))
return -ENOSPC;
dev_priv->workarounds.reg[idx].addr = addr;
dev_priv->workarounds.reg[idx].value = val;
dev_priv->workarounds.reg[idx].mask = mask;
dev_priv->workarounds.count++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mika Kuoppala | 62 | 65.96% | 1 | 20.00% |
Arun Siluvery | 29 | 30.85% | 2 | 40.00% |
Damien Lespiau | 2 | 2.13% | 1 | 20.00% |
Ville Syrjälä | 1 | 1.06% | 1 | 20.00% |
Total | 94 | 100.00% | 5 | 100.00% |
#define WA_REG(addr, mask, val) do { \
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
if (r) \
return r; \
} while (0)
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
#define WA_CLR_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, mask, _MASKED_FIELD(mask, value))
#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[engine->id];
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
i915_mmio_reg_offset(reg));
wa->hw_whitelist_count[engine->id]++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Siluvery | 83 | 93.26% | 1 | 33.33% |
Tvrtko A. Ursulin | 5 | 5.62% | 1 | 33.33% |
Chris Wilson | 1 | 1.12% | 1 | 33.33% |
Total | 89 | 100.00% | 3 | 100.00% |
static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8x4 pixel/sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer."
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Siluvery | 81 | 96.43% | 9 | 81.82% |
Chris Wilson | 2 | 2.38% | 1 | 9.09% |
Tvrtko A. Ursulin | 1 | 1.19% | 1 | 9.09% |
Total | 84 | 100.00% | 11 | 100.00% |
static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_workarounds(engine);
if (ret)
return ret;
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
WA_SET_BIT_MASKED(HDC_CHICKEN0,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Siluvery | 55 | 66.27% | 3 | 30.00% |
Rodrigo Vivi | 11 | 13.25% | 2 | 20.00% |
Chris Wilson | 6 | 7.23% | 1 | 10.00% |
Mika Kuoppala | 4 | 4.82% | 1 | 10.00% |
Damien Lespiau | 4 | 4.82% | 1 | 10.00% |
Tvrtko A. Ursulin | 2 | 2.41% | 1 | 10.00% |
Ville Syrjälä | 1 | 1.20% | 1 | 10.00% |
Total | 83 | 100.00% | 10 | 100.00% |
static int chv_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_workarounds(engine);
if (ret)
return ret;
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ville Syrjälä | 21 | 37.50% | 2 | 22.22% |
Arun Siluvery | 13 | 23.21% | 1 | 11.11% |
Chris Wilson | 8 | 14.29% | 3 | 33.33% |
Kenneth Graunke | 7 | 12.50% | 1 | 11.11% |
Mika Kuoppala | 5 | 8.93% | 1 | 11.11% |
Tvrtko A. Ursulin | 2 | 3.57% | 1 | 11.11% |
Total | 56 | 100.00% | 9 | 100.00% |
static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
* WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
* but we do that in per ctx batchbuffer as there is an issue
* with this register not getting restored on ctx restore
*/
}
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
* in some hsds for skl. We keep the tie for all gen9. The
* documentation is a bit hazy and so we want to get common behaviour,
* even though there is no clear evidence we would need both on kbl/bxt.
* This area has been source of system hangs so we play it safe
* and mimic the skl regardless of what bspec says.
*
* Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl,bxt,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Siluvery | 85 | 27.78% | 7 | 20.00% |
Mika Kuoppala | 69 | 22.55% | 4 | 11.43% |
Nick Hoath | 53 | 17.32% | 7 | 20.00% |
Jani Nikula | 26 | 8.50% | 4 | 11.43% |
Damien Lespiau | 25 | 8.17% | 4 | 11.43% |
Tim Gore | 14 | 4.58% | 3 | 8.57% |
Ben Widawsky | 10 | 3.27% | 1 | 2.86% |
Robert Beckett | 7 | 2.29% | 1 | 2.86% |
Chris Wilson | 7 | 2.29% | 1 | 2.86% |
Imre Deak | 7 | 2.29% | 2 | 5.71% |
Tvrtko A. Ursulin | 3 | 0.98% | 1 | 2.86% |
Total | 306 | 100.00% | 35 | 100.00% |
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
for (i = 0; i < 3; i++) {
u8 ss;
/*
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
continue;
/*
* subslice_7eu[i] != 0 (because of the check above) and
* ss_max == 4 (maximum number of subslices possible per slice)
*
* -> 0 <= ss <= 3;
*/
ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
return 0;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Damien Lespiau | 170 | 91.89% | 1 | 20.00% |
Imre Deak | 8 | 4.32% | 1 | 20.00% |
Chris Wilson | 3 | 1.62% | 1 | 20.00% |
Zeng Zhaoxiu | 2 | 1.08% | 1 | 20.00% |
Tvrtko A. Ursulin | 2 | 1.08% | 1 | 20.00% |
Total | 185 | 100.00% | 5 | 100.00% |
static int skl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/*
* Actual WA is to disable percontext preemption granularity control
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
return skl_tune_iz_hashing(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mika Kuoppala | 45 | 39.82% | 4 | 33.33% |
Arun Siluvery | 36 | 31.86% | 3 | 25.00% |
Damien Lespiau | 23 | 20.35% | 3 | 25.00% |
Chris Wilson | 5 | 4.42% | 1 | 8.33% |
Tvrtko A. Ursulin | 4 | 3.54% | 1 | 8.33% |
Total | 113 | 100.00% | 12 | 100.00% |
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaDisablePooledEuLoadBalancingFix:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
/* WaDisableSbeCacheDispatchPortSharing:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
}
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
/* WaToEnableHwFixForPushConstHWBug:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaInPlaceDecompressionHang:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arun Siluvery | 76 | 29.46% | 4 | 23.53% |
Mika Kuoppala | 75 | 29.07% | 3 | 17.65% |
Nick Hoath | 47 | 18.22% | 3 | 17.65% |
Tim Gore | 19 | 7.36% | 2 | 11.76% |
Jani Nikula | 15 | 5.81% | 1 | 5.88% |
Chris Wilson | 10 | 3.88% | 1 | 5.88% |
Imre Deak | 9 | 3.49% | 1 | 5.88% |
Tvrtko A. Ursulin | 4 | 1.55% | 1 | 5.88% |
Matthew Auld | 3 | 1.16% | 1 | 5.88% |
Total | 258 | 100.00% | 17 | 100.00% |
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaEnableGapsTsvCreditFix:kbl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
WA_SET_BIT(GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:kbl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaDisableLSQCROPERFforOCL:kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mika Kuoppala | 150 | 98.04% | 9 | 90.00% |
Matthew Auld | 3 | 1.96% | 1 | 10.00% |
Total | 153 | 100.00% | 10 | 100.00% |
int init_workarounds_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
if (IS_BROADWELL(dev_priv))
return bdw_init_workarounds(engine);
if (IS_CHERRYVIEW(dev_priv))
return chv_init_workarounds(engine);
if (IS_SKYLAKE(dev_priv))
return skl_init_workarounds(engine);
if (IS_BROXTON(dev_priv))
return bxt_init_workarounds(engine);
if (IS_KABYLAKE(dev_priv))
return kbl_init_workarounds(engine);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mika Kuoppala | 66 | 57.39% | 2 | 20.00% |
Chris Wilson | 12 | 10.43% | 2 | 20.00% |
Damien Lespiau | 11 | 9.57% | 1 | 10.00% |
Arun Siluvery | 11 | 9.57% | 1 | 10.00% |
Nick Hoath | 8 | 6.96% | 2 | 20.00% |
Tvrtko A. Ursulin | 6 | 5.22% | 1 | 10.00% |
Ville Syrjälä | 1 | 0.87% | 1 | 10.00% |
Total | 115 | 100.00% | 10 | 100.00% |
static int init_render_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret = init_ring_common(engine);
if (ret)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
if (IS_GEN6(dev_priv))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
if (IS_GEN7(dev_priv))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (IS_GEN6(dev_priv)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_INFO(dev_priv)->gen >= 6)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
return init_workarounds_ring(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 62 | 32.80% | 10 | 35.71% |
Daniel Vetter | 27 | 14.29% | 2 | 7.14% |
Jesse Barnes | 18 | 9.52% | 1 | 3.57% |
Tvrtko A. Ursulin | 16 | 8.47% | 2 | 7.14% |
Ben Widawsky | 16 | 8.47% | 2 | 7.14% |
Eric Anholt | 13 | 6.88% | 1 | 3.57% |
Ville Syrjälä | 13 | 6.88% | 3 | 10.71% |
Konrad Zapalowicz | 7 | 3.70% | 1 | 3.57% |
Akash Goel | 6 | 3.17% | 2 | 7.14% |
Zou Nan hai | 6 | 3.17% | 1 | 3.57% |
Mika Kuoppala | 3 | 1.59% | 1 | 3.57% |
Imre Deak | 1 | 0.53% | 1 | 3.57% |
Oscar Mateo | 1 | 0.53% | 1 | 3.57% |
Total | 189 | 100.00% | 28 | 100.00% |
static void render_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 13 | 46.43% | 4 | 50.00% |
Ben Widawsky | 12 | 42.86% | 1 | 12.50% |
Daniel Vetter | 1 | 3.57% | 1 | 12.50% |
Tvrtko A. Ursulin | 1 | 3.57% | 1 | 12.50% |
Oscar Mateo | 1 | 3.57% | 1 | 12.50% |
Total | 28 | 100.00% | 8 | 100.00% |
static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
*out++ = GFX_OP_PIPE_CONTROL(6);
*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
*out++ = lower_32_bits(gtt_offset);
*out++ = upper_32_bits(gtt_offset);
*out++ = req->global_seqno;
*out++ = 0;
*out++ = (MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
*out++ = 0;
}
return out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 80 | 56.34% | 1 | 8.33% |
Chris Wilson | 54 | 38.03% | 8 | 66.67% |
Dave Gordon | 6 | 4.23% | 1 | 8.33% |
John Harrison | 1 | 0.70% | 1 | 8.33% |
Akash Goel | 1 | 0.70% | 1 | 8.33% |
Total | 142 | 100.00% | 12 | 100.00% |
static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
*out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
*out++ = upper_32_bits(gtt_offset);
*out++ = req->global_seqno;
*out++ = (MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
*out++ = 0;
}
return out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 66 | 51.16% | 1 | 6.25% |
Chris Wilson | 48 | 37.21% | 10 | 62.50% |
Daniel Vetter | 7 | 5.43% | 2 | 12.50% |
Dave Gordon | 6 | 4.65% | 1 | 6.25% |
Akash Goel | 1 | 0.78% | 1 | 6.25% |
John Harrison | 1 | 0.78% | 1 | 6.25% |
Total | 129 | 100.00% | 16 | 100.00% |
static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int num_rings = 0;
for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
continue;
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
*out++ = MI_LOAD_REGISTER_IMM(1);
*out++ = i915_mmio_reg_offset(mbox_reg);
*out++ = req->global_seqno;
num_rings++;
}
}
if (num_rings & 1)
*out++ = MI_NOOP;
return out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 52 | 37.96% | 9 | 47.37% |
Ben Widawsky | 41 | 29.93% | 3 | 15.79% |
Tvrtko A. Ursulin | 25 | 18.25% | 1 | 5.26% |
Akash Goel | 6 | 4.38% | 1 | 5.26% |
Ville Syrjälä | 4 | 2.92% | 1 | 5.26% |
Zou Nan hai | 4 | 2.92% | 1 | 5.26% |
John Harrison | 3 | 2.19% | 1 | 5.26% |
Oscar Mateo | 1 | 0.73% | 1 | 5.26% |
Dave Gordon | 1 | 0.73% | 1 | 5.26% |
Total | 137 | 100.00% | 19 | 100.00% |
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
i915_gem_request_submit(request);
I915_WRITE_TAIL(request->engine, request->tail);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 29 | 80.56% | 3 | 60.00% |
John Harrison | 6 | 16.67% | 1 | 20.00% |
Zhenyu Wang | 1 | 2.78% | 1 | 20.00% |
Total | 36 | 100.00% | 5 | 100.00% |
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
u32 *out)
{
*out++ = MI_STORE_DWORD_INDEX;
*out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
*out++ = req->global_seqno;
*out++ = MI_USER_INTERRUPT;
req->tail = intel_ring_offset(req->ring, out);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 52 | 92.86% | 7 | 70.00% |
Zhenyu Wang | 2 | 3.57% | 1 | 10.00% |
Ben Widawsky | 1 | 1.79% | 1 | 10.00% |
John Harrison | 1 | 1.79% | 1 | 10.00% |
Total | 56 | 100.00% | 10 | 100.00% |
static const int i9xx_emit_breadcrumb_sz = 4;
/**
* gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
u32 *out)
{
return i9xx_emit_breadcrumb(req,
req->engine->semaphore.signal(req, out));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 34 | 100.00% | 3 | 100.00% |
Total | 34 | 100.00% | 3 | 100.00% |
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
u32 *out)
{
struct intel_engine_cs *engine = req->engine;
if (engine->semaphore.signal)
out = engine->semaphore.signal(req, out);
*out++ = GFX_OP_PIPE_CONTROL(6);
*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE);
*out++ = intel_hws_seqno_address(engine);
*out++ = 0;
*out++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
*out++ = 0;
*out++ = MI_USER_INTERRUPT;
*out++ = MI_NOOP;
req->tail = intel_ring_offset(req->ring, out);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 121 | 100.00% | 6 | 100.00% |
Total | 121 | 100.00% | 6 | 100.00% |
static const int gen8_render_emit_breadcrumb_sz = 8;
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
* @waiter - ring that is waiting
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
static int
gen8_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
struct i915_hw_ppgtt *ppgtt;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring,
MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring);
/* When the !RCS engines idle waiting upon a semaphore, they lose their
* pagetables and we must reload them before executing the batch.
* We do this on the i915_switch_context() following the wait and
* before the dispatch.
*/
ppgtt = req->ctx->ppgtt;
if (ppgtt && req->engine->id != RCS)
ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 73 | 47.40% | 2 | 15.38% |
Chris Wilson | 63 | 40.91% | 9 | 69.23% |
Tvrtko A. Ursulin | 12 | 7.79% | 1 | 7.69% |
John Harrison | 6 | 3.90% | 1 | 7.69% |
Total | 154 | 100.00% | 13 | 100.00% |
static int
gen6_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
struct intel_ring *ring = req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
int ret;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, dw1 | wait_mbox);
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 67 | 54.47% | 9 | 56.25% |
Ben Widawsky | 28 | 22.76% | 3 | 18.75% |
Zhenyu Wang | 13 | 10.57% | 1 | 6.25% |
Daniel Vetter | 8 | 6.50% | 1 | 6.25% |
John Harrison | 6 | 4.88% | 1 | 6.25% |
Tvrtko A. Ursulin | 1 | 0.81% | 1 | 6.25% |
Total | 123 | 100.00% | 16 | 100.00% |
static void
gen5_seqno_barrier(struct intel_engine_cs *engine)
{
/* MI_STORE are internally buffered by the GPU and not flushed
* either by MI_FLUSH or SyncFlush or any other combination of
* MI commands.
*
* "Only the submission of the store operation is guaranteed.
* The write result will be complete (coherent) some time later
* (this is practically a finite period but there is no guaranteed
* latency)."
*
* Empirically, we observe that we need a delay of at least 75us to
* be sure that the seqno write is visible by the CPU.
*/
usleep_range(125, 250);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 18 | 94.74% | 2 | 66.67% |
Dave Gordon | 1 | 5.26% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page.
*
* Note that this effectively stalls the read by the time it takes to
* do a memory transaction, which more or less ensures that the write
* from the GPU has sufficient time to invalidate the CPU cacheline.
* Alternatively we could delay the interrupt from the CS ring to give
* the write time to land, but that would incur a delay after every
* batch i.e. much more frequent than a delay when waiting for the
* interrupt (with the same net latency).
*
* Also note that to prevent whole machine hangs on gen7, we have to
* take the spinlock to guard against concurrent cacheline access.
*/
spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
spin_unlock_irq(&dev_priv->uncore.lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 39 | 76.47% | 6 | 66.67% |
Daniel Vetter | 8 | 15.69% | 1 | 11.11% |
Tvrtko A. Ursulin | 3 | 5.88% | 1 | 11.11% |
Oscar Mateo | 1 | 1.96% | 1 | 11.11% |
Total | 51 | 100.00% | 9 | 100.00% |
static void
gen5_irq_enable(struct intel_engine_cs *engine)
{
gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 7 | 31.82% | 3 | 30.00% |
Zou Nan hai | 5 | 22.73% | 1 | 10.00% |
Daniel Vetter | 5 | 22.73% | 3 | 30.00% |
Tvrtko A. Ursulin | 3 | 13.64% | 1 | 10.00% |
Oscar Mateo | 1 | 4.55% | 1 | 10.00% |
Paulo Zanoni | 1 | 4.55% | 1 | 10.00% |
Total | 22 | 100.00% | 10 | 100.00% |
static void
gen5_irq_disable(struct intel_engine_cs *engine)
{
gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 5 | 22.73% | 1 | 11.11% |
Chris Wilson | 5 | 22.73% | 3 | 33.33% |
Eric Anholt | 5 | 22.73% | 1 | 11.11% |
Tvrtko A. Ursulin | 3 | 13.64% | 1 | 11.11% |
Zou Nan hai | 2 | 9.09% | 1 | 11.11% |
Paulo Zanoni | 1 | 4.55% | 1 | 11.11% |
Oscar Mateo | 1 | 4.55% | 1 | 11.11% |
Total | 22 | 100.00% | 9 | 100.00% |
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 33 | 68.75% | 3 | 30.00% |
Chris Wilson | 10 | 20.83% | 3 | 30.00% |
Tvrtko A. Ursulin | 2 | 4.17% | 1 | 10.00% |
Jani Nikula | 1 | 2.08% | 1 | 10.00% |
Zou Nan hai | 1 | 2.08% | 1 | 10.00% |
Oscar Mateo | 1 | 2.08% | 1 | 10.00% |
Total | 48 | 100.00% | 10 | 100.00% |
static void
i9xx_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Vetter | 28 | 75.68% | 3 | 30.00% |
Chris Wilson | 3 | 8.11% | 2 | 20.00% |
Tvrtko A. Ursulin | 2 | 5.41% | 1 | 10.00% |
Zou Nan hai | 1 | 2.70% | 1 | 10.00% |
Eric Anholt | 1 | 2.70% | 1 | 10.00% |
Oscar Mateo | 1 | 2.70% | 1 | 10.00% |
Jani Nikula | 1 | 2.70% | 1 | 10.00% |
Total | 37 | 100.00% | 10 | 100.00% |
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(RING_IMR(engine->mmio_base));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 44 | 91.67% | 3 | 50.00% |
Tvrtko A. Ursulin | 2 | 4.17% | 1 | 16.67% |
Oscar Mateo | 1 | 2.08% | 1 | 16.67% |
Jani Nikula | 1 | 2.08% | 1 | 16.67% |
Total | 48 | 100.00% | 6 | 100.00% |
static void
i8xx_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 33 | 89.19% | 3 | 50.00% |
Tvrtko A. Ursulin | 2 | 5.41% | 1 | 16.67% |
Oscar Mateo | 1 | 2.70% | 1 | 16.67% |
Jani Nikula | 1 | 2.70% | 1 | 16.67% |
Total | 37 | 100.00% | 6 | 100.00% |
static int
bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 28 | 43.75% | 8 | 66.67% |
Zou Nan hai | 27 | 42.19% | 2 | 16.67% |
John Harrison | 9 | 14.06% | 2 | 16.67% |
Total | 64 | 100.00% | 12 | 100.00% |
static void
gen6_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
engine->irq_keep_mask));
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 24 | 53.33% | 5 | 38.46% |
Ben Widawsky | 8 | 17.78% | 1 | 7.69% |
Tvrtko A. Ursulin | 5 | 11.11% | 1 | 7.69% |
Daniel Vetter | 3 | 6.67% | 2 | 15.38% |
Paulo Zanoni | 2 | 4.44% | 1 | 7.69% |
Zou Nan hai | 1 | 2.22% | 1 | 7.69% |
Oscar Mateo | 1 | 2.22% | 1 | 7.69% |
Jani Nikula | 1 | 2.22% | 1 | 7.69% |
Total | 45 | 100.00% | 13 | 100.00% |
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 24 | 61.54% | 5 | 41.67% |
Tvrtko A. Ursulin | 4 | 10.26% | 1 | 8.33% |
Ben Widawsky | 4 | 10.26% | 1 | 8.33% |
Daniel Vetter | 3 | 7.69% | 2 | 16.67% |
Paulo Zanoni | 2 | 5.13% | 1 | 8.33% |
Oscar Mateo | 1 | 2.56% | 1 | 8.33% |
Jani Nikula | 1 | 2.56% | 1 | 8.33% |
Total | 39 | 100.00% | 12 | 100.00% |
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 27 | 69.23% | 1 | 12.50% |
Chris Wilson | 5 | 12.82% | 3 | 37.50% |
Tvrtko A. Ursulin | 4 | 10.26% | 1 | 12.50% |
Oscar Mateo | 1 | 2.56% | 1 | 12.50% |
Paulo Zanoni | 1 | 2.56% | 1 | 12.50% |
Akash Goel | 1 | 2.56% | 1 | 12.50% |
Total | 39 | 100.00% | 8 | 100.00% |
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 28 | 75.68% | 1 | 14.29% |
Tvrtko A. Ursulin | 3 | 8.11% | 1 | 14.29% |
Chris Wilson | 3 | 8.11% | 2 | 28.57% |
Oscar Mateo | 1 | 2.70% | 1 | 14.29% |
Paulo Zanoni | 1 | 2.70% | 1 | 14.29% |
Akash Goel | 1 | 2.70% | 1 | 14.29% |
Total | 37 | 100.00% | 7 | 100.00% |
static void
gen8_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
engine->irq_keep_mask));
POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 33 | 71.74% | 2 | 28.57% |
Chris Wilson | 7 | 15.22% | 3 | 42.86% |
Tvrtko A. Ursulin | 5 | 10.87% | 1 | 14.29% |
Oscar Mateo | 1 | 2.17% | 1 | 14.29% |
Total | 46 | 100.00% | 7 | 100.00% |
static void
gen8_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 21 | 70.00% | 1 | 16.67% |
Chris Wilson | 5 | 16.67% | 3 | 50.00% |
Tvrtko A. Ursulin | 3 | 10.00% | 1 | 16.67% |
Oscar Mateo | 1 | 3.33% | 1 | 16.67% |
Total | 30 | 100.00% | 6 | 100.00% |
static int
i965_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Zou Nan hai | 36 | 43.37% | 1 | 7.14% |
Chris Wilson | 34 | 40.96% | 8 | 57.14% |
John Harrison | 11 | 13.25% | 3 | 21.43% |
Ben Widawsky | 2 | 2.41% | 2 | 14.29% |
Total | 83 | 100.00% | 14 | 100.00% |
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
/* Evict the invalid PTE TLBs */
intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 0xdeadbeef);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
ret = intel_ring_begin(req, 6 + 2);
if (ret)
return ret;
/* Blit the batch (which has now all relocs applied) to the
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
intel_ring_emit(ring,
BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
intel_ring_emit(ring, MI_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
/* ... and execute it. */
offset = cs_offset;
}
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 121 | 41.44% | 10 | 50.00% |
Daniel Vetter | 113 | 38.70% | 2 | 10.00% |
Zou Nan hai | 25 | 8.56% | 1 | 5.00% |
John Harrison | 14 | 4.79% | 3 | 15.00% |
Eric Anholt | 13 | 4.45% | 1 | 5.00% |
Ville Syrjälä | 4 | 1.37% | 1 | 5.00% |
Ben Widawsky | 1 | 0.34% | 1 | 5.00% |
Tvrtko A. Ursulin | 1 | 0.34% | 1 | 5.00% |
Total | 292 | 100.00% | 20 | 100.00% |
static int
i915_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 27 | 32.53% | 9 | 56.25% |
Zou Nan hai | 22 | 26.51% | 1 | 6.25% |
Daniel Vetter | 14 | 16.87% | 1 | 6.25% |
John Harrison | 11 | 13.25% | 3 | 18.75% |
Eric Anholt | 8 | 9.64% | 1 | 6.25% |
Ben Widawsky | 1 | 1.20% | 1 | 6.25% |
Total | 83 | 100.00% | 16 | 100.00% |
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah)
return;
drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
engine->status_page.page_addr = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ville Syrjälä | 41 | 85.42% | 1 | 25.00% |
Chris Wilson | 4 | 8.33% | 2 | 50.00% |
Tvrtko A. Ursulin | 3 | 6.25% | 1 | 25.00% |
Total | 48 | 100.00% | 4 | 100.00% |
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
obj = vma->obj;
i915_vma_unpin(vma);
i915_vma_close(vma);
i915_gem_object_unpin_map(obj);
__i915_gem_object_release_unless_active(obj);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 32 | 49.23% | 3 | 42.86% |
Zou Nan hai | 15 | 23.08% | 1 | 14.29% |
Eric Anholt | 15 | 23.08% | 1 | 14.29% |
Tvrtko A. Ursulin | 2 | 3.08% | 1 | 14.29% |
Oscar Mateo | 1 | 1.54% | 1 | 14.29% |
Total | 65 | 100.00% | 7 | 100.00% |
static int init_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags;
void *vaddr;
int ret;
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
}
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
if (ret)
goto err;
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
flags = PIN_GLOBAL;
if (!HAS_LLC(engine->i915))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
* gen4, gen5, or byt, they also behave similarly
* and hang if the HWS is placed at the top of the
* GTT. To generalise, it appears that all !llc
* platforms have issues with us placing the HWS
* above the mappable region (even though we never
* actualy map it).
*/
flags |= PIN_MAPPABLE;
ret = i915_vma_pin(vma, 0, 4096, flags);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err_unpin;
}
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
return 0;
err_unpin:
i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 166 | 66.14% | 15 | 68.18% |
Eric Anholt | 34 | 13.55% | 1 | 4.55% |
Zou Nan hai | 33 | 13.15% | 1 | 4.55% |
Daniel Vetter | 9 | 3.59% | 2 | 9.09% |
Tvrtko A. Ursulin | 7 | 2.79% | 1 | 4.55% |
Oscar Mateo | 1 | 0.40% | 1 | 4.55% |
Ville Syrjälä | 1 | 0.40% | 1 | 4.55% |
Total | 251 | 100.00% | 22 | 100.00% |
static int init_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->status_page_dmah =
drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(engine->status_page.page_addr, 0, PAGE_SIZE);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 69 | 92.00% | 3 | 50.00% |
Tvrtko A. Ursulin | 4 | 5.33% | 1 | 16.67% |
Oscar Mateo | 1 | 1.33% | 1 | 16.67% |
Daniel Vetter | 1 | 1.33% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
{
unsigned int flags;
enum i915_map_type map;
struct i915_vma *vma = ring->vma;
void *addr;
int ret;
GEM_BUG_ON(ring->vaddr);
map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
flags = PIN_GLOBAL;
if (offset_bias)
flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
else
ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
if (unlikely(ret))
return ret;
}
ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
if (unlikely(ret))
return ret;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
addr = i915_gem_object_pin_map(vma->obj, map);
if (IS_ERR(addr))
goto err;
ring->vaddr = addr;
return 0;
err:
i915_vma_unpin(vma);
return PTR_ERR(addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 180 | 78.95% | 17 | 73.91% |
Daniele Ceraolo Spurio | 18 | 7.89% | 1 | 4.35% |
Dave Gordon | 11 | 4.82% | 1 | 4.35% |
Thomas Daniel | 7 | 3.07% | 1 | 4.35% |
Zou Nan hai | 6 | 2.63% | 1 | 4.35% |
Eric Anholt | 5 | 2.19% | 1 | 4.35% |
Tvrtko A. Ursulin | 1 | 0.44% | 1 | 4.35% |
Total | 228 | 100.00% | 23 | 100.00% |
void intel_ring_unpin(struct intel_ring *ring)
{
GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
i915_gem_object_unpin_map(ring->vma->obj);
ring->vaddr = NULL;
i915_vma_unpin(ring->vma);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 65 | 100.00% | 3 | 100.00% |
Total | 65 | 100.00% | 3 | 100.00% |
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj)
obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
return vma;
err:
i915_gem_object_put(obj);
return vma;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 66 | 61.11% | 6 | 66.67% |
Thomas Daniel | 40 | 37.04% | 1 | 11.11% |
Dave Gordon | 1 | 0.93% | 1 | 11.11% |
Zou Nan hai | 1 | 0.93% | 1 | 11.11% |
Total | 108 | 100.00% | 9 | 100.00% |
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
struct intel_ring *ring;
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return ERR_PTR(-ENOMEM);
ring->engine = engine;
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
ring->effective_size = size;
if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
intel_ring_update_space(ring);
vma = intel_ring_create_vma(engine->i915, size);
if (IS_ERR(vma)) {
kfree(ring);
return ERR_CAST(vma);
}
ring->vma = vma;
return ring;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 145 | 83.82% | 9 | 64.29% |
Oscar Mateo | 25 | 14.45% | 2 | 14.29% |
Tvrtko A. Ursulin | 1 | 0.58% | 1 | 7.14% |
Daniel Vetter | 1 | 0.58% | 1 | 7.14% |
Jani Nikula | 1 | 0.58% | 1 | 7.14% |
Total | 173 | 100.00% | 14 | 100.00% |
void
intel_ring_free(struct intel_ring *ring)
{
struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
__i915_gem_object_release_unless_active(obj);
kfree(ring);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 38 | 100.00% | 4 | 100.00% |
Total | 38 | 100.00% | 4 | 100.00% |
static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
{
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
/* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret)
return ret;
}
return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 80 | 100.00% | 2 | 100.00% |
Total | 80 | 100.00% | 2 | 100.00% |
static int intel_ring_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (ce->pin_count++)
return 0;
if (ce->state) {
unsigned int flags;
flags = 0;
if (i915_gem_context_is_kernel(ctx))
flags = PIN_HIGH;
ret = context_pin(ctx, flags);
if (ret)
goto error;
ce->state->obj->mm.dirty = true;
}
/* The kernel context is only used as a placeholder for flushing the
* active context. It is never used for submitting user rendering and
* as such never requires the golden render context, and so we can skip
* emitting it when we switch to the kernel context. This is required
* as during eviction we cannot allocate and pin the renderstate in
* order to initialise the context.
*/
if (i915_gem_context_is_kernel(ctx))
ce->initialised = true;
i915_gem_context_get(ctx);
return 0;
error:
ce->pin_count = 0;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 144 | 100.00% | 9 | 100.00% |
Total | 144 | 100.00% | 9 | 100.00% |
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(ce->pin_count == 0);
if (--ce->pin_count)
return;
if (ce->state)
i915_vma_unpin(ce->state);
i915_gem_context_put(ctx);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 78 | 100.00% | 5 | 100.00% |
Total | 78 | 100.00% | 5 | 100.00% |
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring;
int ret;
WARN_ON(engine->buffer);
intel_engine_setup_common(engine);
ret = intel_engine_init_common(engine);
if (ret)
goto error;
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error;
}
if (HWS_NEEDS_PHYSICAL(dev_priv)) {
WARN_ON(engine->id != RCS);
ret = init_phys_status_page(engine);
if (ret)
goto error;
} else {
ret = init_status_page(engine);
if (ret)
goto error;
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
if (ret) {
intel_ring_free(ring);
goto error;
}
engine->buffer = ring;
return 0;
error:
intel_engine_cleanup(engine);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 91 | 51.70% | 12 | 52.17% |
Thomas Daniel | 17 | 9.66% | 1 | 4.35% |
Oscar Mateo | 16 | 9.09% | 1 | 4.35% |
Tvrtko A. Ursulin | 13 | 7.39% | 3 | 13.04% |
Carlos Santa | 12 | 6.82% | 1 | 4.35% |
Dave Gordon | 9 | 5.11% | 1 | 4.35% |
Zou Nan hai | 9 | 5.11% | 1 | 4.35% |
Eric Anholt | 6 | 3.41% | 1 | 4.35% |
Daniele Ceraolo Spurio | 2 | 1.14% | 1 | 4.35% |
Ben Widawsky | 1 | 0.57% | 1 | 4.35% |
Total | 176 | 100.00% | 23 | 100.00% |
void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
dev_priv = engine->i915;
if (engine->buffer) {
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
intel_ring_free(engine->buffer);
engine->buffer = NULL;
}
if (engine->cleanup)
engine->cleanup(engine);
if (HWS_NEEDS_PHYSICAL(dev_priv)) {
WARN_ON(engine->id != RCS);
cleanup_phys_status_page(engine);
} else {
cleanup_status_page(engine);
}
intel_engine_cleanup_common(engine);
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 41 | 29.08% | 10 | 43.48% |
Ville Syrjälä | 22 | 15.60% | 2 | 8.70% |
Zou Nan hai | 18 | 12.77% | 2 | 8.70% |
Akash Goel | 16 | 11.35% | 1 | 4.35% |
Tvrtko A. Ursulin | 13 | 9.22% | 1 | 4.35% |
Dave Gordon | 11 | 7.80% | 1 | 4.35% |
Carlos Santa | 9 | 6.38% | 1 | 4.35% |
John Harrison | 4 | 2.84% | 1 | 4.35% |
Thomas Daniel | 3 | 2.13% | 1 | 4.35% |
Eric Anholt | 2 | 1.42% | 1 | 4.35% |
Ben Widawsky | 1 | 0.71% | 1 | 4.35% |
Oscar Mateo | 1 | 0.71% | 1 | 4.35% |
Total | 141 | 100.00% | 23 | 100.00% |
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 44 | 88.00% | 1 | 50.00% |
Akash Goel | 6 | 12.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static int ring_request_alloc(struct drm_i915_gem_request *request)
{
int ret;
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
GEM_BUG_ON(!request->engine->buffer);
request->ring = request->engine->buffer;
ret = intel_ring_begin(request, 0);
if (ret)
return ret;
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 71 | 83.53% | 7 | 53.85% |
John Harrison | 13 | 15.29% | 5 | 38.46% |
Tvrtko A. Ursulin | 1 | 1.18% | 1 | 7.69% |
Total | 85 | 100.00% | 13 | 100.00% |
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
long timeout;
lockdep_assert_held(&req->i915->drm.struct_mutex);
intel_ring_update_space(ring);
if (ring->space >= bytes)
return 0;
/*
* Space is reserved in the ringbuffer for finalising the request,
* as that cannot be allowed to fail. During request finalisation,
* reserved_space is set to 0 to stop the overallocation and the
* assumption is that then we never need to wait (which has the
* risk of failing with EINTR).
*
* See also i915_gem_request_alloc() and i915_add_request().
*/
GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &ring->request_list, ring_link) {
unsigned space;
/* Would completion of this request free enough space? */
space = __intel_ring_space(target->postfix, ring->tail,
ring->size);
if (space >= bytes)
break;
}
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
timeout = i915_wait_request(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
i915_gem_request_retire_upto(target);
intel_ring_update_space(ring);
GEM_BUG_ON(ring->space < bytes);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 153 | 90.53% | 10 | 83.33% |
John Harrison | 16 | 9.47% | 2 | 16.67% |
Total | 169 | 100.00% | 12 | 100.00% |
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail;
int remain_usable = ring->effective_size - ring->tail;
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
total_bytes = bytes + req->reserved_space;
if (unlikely(bytes > remain_usable)) {
/*
* Not enough space for the basic request. So need to flush
* out the remainder and then wait for base + reserved.
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
} else if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
* falls off the end. So we don't need an immediate wrap
* and only need to effectively wait for the reserved
* size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + req->reserved_space;
} else {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
}
if (wait_bytes > ring->space) {
int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
}
if (unlikely(need_wrap)) {
GEM_BUG_ON(remain_actual > ring->space);
GEM_BUG_ON(ring->tail + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */
memset(ring->vaddr + ring->tail, 0, remain_actual);
ring->tail = 0;
ring->space -= remain_actual;
}
ring->space -= bytes;
GEM_BUG_ON(ring->space < 0);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 131 | 56.96% | 8 | 53.33% |
John Harrison | 68 | 29.57% | 2 | 13.33% |
Zou Nan hai | 20 | 8.70% | 1 | 6.67% |
Oscar Mateo | 6 | 2.61% | 2 | 13.33% |
Mika Kuoppala | 4 | 1.74% | 1 | 6.67% |
Tvrtko A. Ursulin | 1 | 0.43% | 1 | 6.67% |
Total | 230 | 100.00% | 15 | 100.00% |
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
int num_dwords =
(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
return 0;
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
ret = intel_ring_begin(req, num_dwords);
if (ret)
return ret;
while (num_dwords--)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ville Syrjälä | 67 | 68.37% | 1 | 12.50% |
Chris Wilson | 21 | 21.43% | 4 | 50.00% |
John Harrison | 9 | 9.18% | 2 | 25.00% |
Oscar Mateo | 1 | 1.02% | 1 | 12.50% |
Total | 98 | 100.00% | 8 | 100.00% |
static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
if (intel_wait_for_register_fw(dev_priv,
GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_INDICATOR,
0,
50))
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
i9xx_submit_request(request);
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 48 | 52.17% | 7 | 63.64% |
Xiang, Haihao | 41 | 44.57% | 2 | 18.18% |
Jani Nikula | 2 | 2.17% | 1 | 9.09% |
Daniel Vetter | 1 | 1.09% | 1 | 9.09% |
Total | 92 | 100.00% | 11 | 100.00% |
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
uint32_t cmd;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 63 | 40.91% | 9 | 60.00% |
Ben Widawsky | 40 | 25.97% | 2 | 13.33% |
Xiang, Haihao | 38 | 24.68% | 1 | 6.67% |
John Harrison | 9 | 5.84% | 2 | 13.33% |
Jesse Barnes | 4 | 2.60% | 1 | 6.67% |
Total | 154 | 100.00% | 15 | 100.00% |
static int
gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 87 | 69.60% | 3 | 23.08% |
Chris Wilson | 12 | 9.60% | 4 | 30.77% |
John Harrison | 11 | 8.80% | 3 | 23.08% |
Abdiel Janulgue | 10 | 8.00% | 1 | 7.69% |
Daniel Vetter | 3 | 2.40% | 1 | 7.69% |
Xiang, Haihao | 2 | 1.60% | 1 | 7.69% |
Total | 125 | 100.00% | 13 | 100.00% |
static int
hsw_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 70 | 74.47% | 6 | 50.00% |
John Harrison | 11 | 11.70% | 3 | 25.00% |
Abdiel Janulgue | 10 | 10.64% | 1 | 8.33% |
Ben Widawsky | 3 | 3.19% | 2 | 16.67% |
Total | 94 | 100.00% | 12 | 100.00% |
static int
gen6_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 54 | 65.85% | 8 | 61.54% |
Xiang, Haihao | 16 | 19.51% | 1 | 7.69% |
John Harrison | 11 | 13.41% | 3 | 23.08% |
Ben Widawsky | 1 | 1.22% | 1 | 7.69% |
Total | 82 | 100.00% | 13 | 100.00% |
/* Blitter support (SandyBridge+) */
static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
struct intel_ring *ring = req->ring;
uint32_t cmd;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
intel_ring_advance(ring);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 61 | 40.13% | 8 | 50.00% |
Zou Nan hai | 39 | 25.66% | 1 | 6.25% |
Ben Widawsky | 38 | 25.00% | 2 | 12.50% |
John Harrison | 8 | 5.26% | 2 | 12.50% |
Jesse Barnes | 4 | 2.63% | 1 | 6.25% |
Rodrigo Vivi | 1 | 0.66% | 1 | 6.25% |
Daniel Vetter | 1 | 0.66% | 1 | 6.25% |
Total | 152 | 100.00% | 16 | 100.00% |
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
int ret, i;
if (!i915.semaphores)
return;
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
struct i915_vma *vma;
obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(obj))
goto err;
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err_obj;
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
goto err_obj;
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_obj;
dev_priv->semaphore = vma;
}
if (INTEL_GEN(dev_priv) >= 8) {
u32 offset = i915_ggtt_offset(dev_priv->semaphore);
engine->semaphore.sync_to = gen8_ring_sync_to;
engine->semaphore.signal = gen8_xcs_signal;
for (i = 0; i < I915_NUM_ENGINES; i++) {
u32 ring_offset;
if (i != engine->id)
ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
else
ring_offset = MI_SEMAPHORE_SYNC_INVALID;
engine->semaphore.signal_ggtt[i] = ring_offset;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->semaphore.sync_to = gen6_ring_sync_to;
engine->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
* platform. So the semaphore between RCS and VCS2 is
* initialized as INVALID. Gen8 will initialize the
* sema between VCS2 and RCS later.
*/
for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
static const struct {
u32 wait_mbox;
i915_reg_t mbox_reg;
} sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
[RCS_HW] = {
[VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
[BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
},
[VCS_HW] = {
[RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
[BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
},
[BCS_HW] = {
[RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
[VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
},
[VECS_HW] = {
[RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
[VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
[BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
},
};
u32 wait_mbox;
i915_reg_t mbox_reg;
if (i == engine->hw_id) {
wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
mbox_reg = GEN6_NOSYNC;
} else {
wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
}
engine->semaphore.mbox.wait[i] = wait_mbox;
engine->semaphore.mbox.signal[i] = mbox_reg;
}
}
return;
err_obj:
i915_gem_object_put(obj);
err:
DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
i915.semaphores = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tvrtko A. Ursulin | 455 | 73.51% | 9 | 30.00% |
Chris Wilson | 93 | 15.02% | 10 | 33.33% |
Ben Widawsky | 43 | 6.95% | 6 | 20.00% |
Daniel Vetter | 17 | 2.75% | 2 | 6.67% |
Yakui Zhao | 9 | 1.45% | 1 | 3.33% |
Arun Siluvery | 1 | 0.16% | 1 | 3.33% |
Xiang, Haihao | 1 | 0.16% | 1 | 3.33% |
Total | 619 | 100.00% | 30 | 100.00% |
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable = gen8_irq_enable;
engine->irq_disable = gen8_irq_disable;
engine->irq_seqno_barrier = gen6_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
engine->irq_seqno_barrier = gen6_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
engine->irq_seqno_barrier = gen5_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 3) {
engine->irq_enable = i9xx_irq_enable;
engine->irq_disable = i9xx_irq_disable;
} else {
engine->irq_enable = i8xx_irq_enable;
engine->irq_disable = i8xx_irq_disable;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 99 | 64.29% | 8 | 50.00% |
Tvrtko A. Ursulin | 35 | 22.73% | 3 | 18.75% |
Daniel Vetter | 19 | 12.34% | 4 | 25.00% |
Mika Kuoppala | 1 | 0.65% | 1 | 6.25% |
Total | 154 | 100.00% | 16 | 100.00% |
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
intel_ring_init_irq(dev_priv, engine);
intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
engine->context_pin = intel_ring_context_pin;
engine->context_unpin = intel_ring_context_unpin;
engine->request_alloc = ring_request_alloc;
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
if (i915.semaphores) {
int num_rings;
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
if (INTEL_GEN(dev_priv) >= 8) {
engine->emit_breadcrumb_sz += num_rings * 6;
} else {
engine->emit_breadcrumb_sz += num_rings * 3;
if (num_rings & 1)
engine->emit_breadcrumb_sz++;
}
}
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
engine->emit_bb_start = gen8_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 176 | 78.92% | 12 | 66.67% |
Tvrtko A. Ursulin | 31 | 13.90% | 2 | 11.11% |
Daniel Vetter | 14 | 6.28% | 2 | 11.11% |
Jani Nikula | 1 | 0.45% | 1 | 5.56% |
Xiang, Haihao | 1 | 0.45% | 1 | 5.56% |
Total | 223 | 100.00% | 18 | 100.00% |
int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
intel_ring_default_vfuncs(dev_priv, engine);
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
if (i915.semaphores) {
int num_rings;
engine->semaphore.signal = gen8_rcs_signal;
num_rings =
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
engine->emit_breadcrumb_sz += num_rings * 6;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
if (IS_GEN6(dev_priv))
engine->emit_flush = gen6_render_ring_flush;
} else if (IS_GEN5(dev_priv)) {
engine->emit_flush = gen4_render_ring_flush;
} else {
if (INTEL_GEN(dev_priv) < 4)
engine->emit_flush = gen2_render_ring_flush;
else
engine->emit_flush = gen4_render_ring_flush;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(dev_priv))
engine->emit_bb_start = hsw_emit_bb_start;
engine->init_hw = init_render_ring;
engine->cleanup = render_ring_cleanup;
ret = intel_init_ring_buffer(engine);
if (ret)
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
if (ret)
return ret;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tvrtko A. Ursulin | 137 | 46.13% | 4 | 13.33% |
Chris Wilson | 85 | 28.62% | 14 | 46.67% |
Daniel Vetter | 37 | 12.46% | 3 | 10.00% |
Xiang, Haihao | 13 | 4.38% | 1 | 3.33% |
Yakui Zhao | 11 | 3.70% | 1 | 3.33% |
Ben Widawsky | 10 | 3.37% | 5 | 16.67% |
Jesse Barnes | 2 | 0.67% | 1 | 3.33% |
Jani Nikula | 2 | 0.67% | 1 | 3.33% |
Total | 297 | 100.00% | 30 | 100.00% |
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
if (INTEL_GEN(dev_priv) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev_priv))
engine->submit_request = gen6_bsd_submit_request;
engine->emit_flush = gen6_bsd_ring_flush;
if (INTEL_GEN(dev_priv) < 8)
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
engine->mmio_base = BSD_RING_BASE;
engine->emit_flush = bsd_ring_flush;
if (IS_GEN5(dev_priv))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
}
return intel_init_ring_buffer(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tvrtko A. Ursulin | 68 | 60.18% | 5 | 33.33% |
Chris Wilson | 18 | 15.93% | 6 | 40.00% |
Ben Widawsky | 16 | 14.16% | 2 | 13.33% |
Daniel Vetter | 9 | 7.96% | 1 | 6.67% |
Jani Nikula | 2 | 1.77% | 1 | 6.67% |
Total | 113 | 100.00% | 15 | 100.00% |
/**
* Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
*/
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_bsd_ring_flush;
return intel_init_ring_buffer(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tvrtko A. Ursulin | 27 | 71.05% | 3 | 37.50% |
Ben Widawsky | 9 | 23.68% | 3 | 37.50% |
Chris Wilson | 2 | 5.26% | 2 | 25.00% |
Total | 38 | 100.00% | 8 | 100.00% |
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
if (INTEL_GEN(dev_priv) < 8)
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
return intel_init_ring_buffer(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Widawsky | 27 | 50.94% | 3 | 27.27% |
Tvrtko A. Ursulin | 21 | 39.62% | 5 | 45.45% |
Chris Wilson | 3 | 5.66% | 2 | 18.18% |
Jani Nikula | 2 | 3.77% | 1 | 9.09% |
Total | 53 | 100.00% | 11 | 100.00% |
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
if (INTEL_GEN(dev_priv) < 8) {
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
engine->irq_enable = hsw_vebox_irq_enable;
engine->irq_disable = hsw_vebox_irq_disable;
}
return intel_init_ring_buffer(engine);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tvrtko A. Ursulin | 41 | 61.19% | 4 | 40.00% |
Ben Widawsky | 11 | 16.42% | 2 | 20.00% |
Chris Wilson | 10 | 14.93% | 3 | 30.00% |
Yakui Zhao | 5 | 7.46% | 1 | 10.00% |
Total | 67 | 100.00% | 10 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Wilson | 4404 | 40.86% | 160 | 38.55% |
Tvrtko A. Ursulin | 1054 | 9.78% | 20 | 4.82% |
Ben Widawsky | 822 | 7.63% | 27 | 6.51% |
Mika Kuoppala | 642 | 5.96% | 18 | 4.34% |
Arun Siluvery | 513 | 4.76% | 26 | 6.27% |
Daniel Vetter | 509 | 4.72% | 33 | 7.95% |
Damien Lespiau | 429 | 3.98% | 12 | 2.89% |
Zou Nan hai | 340 | 3.15% | 4 | 0.96% |
John Harrison | 313 | 2.90% | 20 | 4.82% |
Jesse Barnes | 267 | 2.48% | 5 | 1.20% |
Paulo Zanoni | 211 | 1.96% | 5 | 1.20% |
Ville Syrjälä | 184 | 1.71% | 12 | 2.89% |
Eric Anholt | 181 | 1.68% | 1 | 0.24% |
Xiang, Haihao | 116 | 1.08% | 3 | 0.72% |
Kenneth Graunke | 110 | 1.02% | 3 | 0.72% |
Nick Hoath | 108 | 1.00% | 10 | 2.41% |
Oscar Mateo | 83 | 0.77% | 6 | 1.45% |
Dave Gordon | 78 | 0.72% | 7 | 1.69% |
Thomas Daniel | 67 | 0.62% | 1 | 0.24% |
Jani Nikula | 58 | 0.54% | 6 | 1.45% |
Akash Goel | 38 | 0.35% | 4 | 0.96% |
Tim Gore | 33 | 0.31% | 5 | 1.20% |
Naresh Kumar Kachhi | 28 | 0.26% | 2 | 0.48% |
Yakui Zhao | 25 | 0.23% | 1 | 0.24% |
Imre Deak | 25 | 0.23% | 5 | 1.20% |
Carlos Santa | 24 | 0.22% | 1 | 0.24% |
Daniele Ceraolo Spurio | 20 | 0.19% | 1 | 0.24% |
Abdiel Janulgue | 20 | 0.19% | 1 | 0.24% |
Zhenyu Wang | 16 | 0.15% | 1 | 0.24% |
Rodrigo Vivi | 12 | 0.11% | 3 | 0.72% |
Francisco Jerez | 8 | 0.07% | 1 | 0.24% |
Robert Beckett | 7 | 0.06% | 1 | 0.24% |
Konrad Zapalowicz | 7 | 0.06% | 1 | 0.24% |
Matthew Auld | 6 | 0.06% | 1 | 0.24% |
Zeng Zhaoxiu | 5 | 0.05% | 1 | 0.24% |
Jiri Kosina | 5 | 0.05% | 1 | 0.24% |
Sean Paul | 3 | 0.03% | 1 | 0.24% |
Deepak S | 2 | 0.02% | 1 | 0.24% |
David Howells | 2 | 0.02% | 1 | 0.24% |
Michel Thierry | 1 | 0.01% | 1 | 0.24% |
Fengguang Wu | 1 | 0.01% | 1 | 0.24% |
Tomas Elf | 1 | 0.01% | 1 | 0.24% |
Total | 10778 | 100.00% | 415 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.