Release 4.7 drivers/gpu/drm/i915/i915_drv.c
  
  
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
 */
/*
 *
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/device.h>
#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
static struct drm_driver driver;
#define GEN_DEFAULT_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
                          PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
        .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
                           TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
        .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
                          CHV_PIPE_C_OFFSET }, \
        .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
                           CHV_TRANSCODER_C_OFFSET, }, \
        .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
                             CHV_PALETTE_C_OFFSET }
#define CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
#define IVB_CURSOR_OFFSETS \
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
#define BDW_COLORS \
	.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
	.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
static const struct intel_device_info intel_i830_info = {
	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_845g_info = {
	.gen = 2, .num_pipes = 1,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i85x_info = {
	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
	.cursor_needs_physical = 1,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.has_fbc = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
	.gen = 2, .num_pipes = 1,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915g_info = {
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
	.cursor_needs_physical = 1,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.supports_tv = 1,
	.has_fbc = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
	.has_hotplug = 1, .cursor_needs_physical = 1,
	.has_overlay = 1, .overlay_needs_physical = 1,
	.supports_tv = 1,
	.has_fbc = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965g_info = {
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
	.has_hotplug = 1,
	.has_overlay = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965gm_info = {
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
	.has_overlay = 1,
	.supports_tv = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g33_info = {
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_overlay = 1,
	.ring_mask = RENDER_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g45_info = {
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
	.has_pipe_cxsr = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
	.has_pipe_cxsr = 1, .has_hotplug = 1,
	.supports_tv = 1,
	.ring_mask = RENDER_RING | BSD_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_overlay = 1,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_d_info = {
	.gen = 5, .num_pipes = 2,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_m_info = {
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_fbc = 1,
	.ring_mask = RENDER_RING | BSD_RING,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_d_info = {
	.gen = 6, .num_pipes = 2,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_fbc = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
	.has_llc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_m_info = {
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.has_fbc = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
	.has_llc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
	CURSOR_OFFSETS,
};
#define GEN7_FEATURES  \
	.gen = 7, .num_pipes = 3, \
        .need_gfx_hws = 1, .has_hotplug = 1, \
        .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .has_llc = 1, \
        GEN_DEFAULT_PIPEOFFSETS, \
        IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.is_mobile = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
	GEN7_FEATURES,
	.is_ivybridge = 1,
	.num_pipes = 0, /* legal, last one wins */
};
#define VLV_FEATURES  \
	.gen = 7, .num_pipes = 2, \
        .need_gfx_hws = 1, .has_hotplug = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .display_mmio_offset = VLV_DISPLAY_BASE, \
        GEN_DEFAULT_PIPEOFFSETS, \
        CURSOR_OFFSETS
static const struct intel_device_info intel_valleyview_m_info = {
	VLV_FEATURES,
	.is_valleyview = 1,
	.is_mobile = 1,
};
static const struct intel_device_info intel_valleyview_d_info = {
	VLV_FEATURES,
	.is_valleyview = 1,
};
#define HSW_FEATURES  \
	GEN7_FEATURES, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
        .has_ddi = 1, \
        .has_fpga_dbg = 1
static const struct intel_device_info intel_haswell_d_info = {
	HSW_FEATURES,
	.is_haswell = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
	HSW_FEATURES,
	.is_haswell = 1,
	.is_mobile = 1,
};
#define BDW_FEATURES \
	HSW_FEATURES, \
        BDW_COLORS
static const struct intel_device_info intel_broadwell_d_info = {
	BDW_FEATURES,
	.gen = 8,
};
static const struct intel_device_info intel_broadwell_m_info = {
	BDW_FEATURES,
	.gen = 8, .is_mobile = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
	BDW_FEATURES,
	.gen = 8,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
	BDW_FEATURES,
	.gen = 8, .is_mobile = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cherryview_info = {
	.gen = 8, .num_pipes = 3,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.is_cherryview = 1,
	.display_mmio_offset = VLV_DISPLAY_BASE,
	GEN_CHV_PIPEOFFSETS,
	CURSOR_OFFSETS,
	CHV_COLORS,
};
static const struct intel_device_info intel_skylake_info = {
	BDW_FEATURES,
	.is_skylake = 1,
	.gen = 9,
};
static const struct intel_device_info intel_skylake_gt3_info = {
	BDW_FEATURES,
	.is_skylake = 1,
	.gen = 9,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broxton_info = {
	.is_preliminary = 1,
	.is_broxton = 1,
	.gen = 9,
	.need_gfx_hws = 1, .has_hotplug = 1,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
	.num_pipes = 3,
	.has_ddi = 1,
	.has_fpga_dbg = 1,
	.has_fbc = 1,
	GEN_DEFAULT_PIPEOFFSETS,
	IVB_CURSOR_OFFSETS,
	BDW_COLORS,
};
static const struct intel_device_info intel_kabylake_info = {
	BDW_FEATURES,
	.is_kabylake = 1,
	.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
	BDW_FEATURES,
	.is_kabylake = 1,
	.gen = 9,
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
/*
 * Make sure any device matches here are from most specific to most
 * general.  For example, since the Quanta match is based on the subsystem
 * and subvendor IDs, we need it to come before the more general IVB
 * PCI ID matches, otherwise we'll use the wrong info struct above.
 */
static const struct pci_device_id pciidlist[] = {
	INTEL_I830_IDS(&intel_i830_info),
	INTEL_I845G_IDS(&intel_845g_info),
	INTEL_I85X_IDS(&intel_i85x_info),
	INTEL_I865G_IDS(&intel_i865g_info),
	INTEL_I915G_IDS(&intel_i915g_info),
	INTEL_I915GM_IDS(&intel_i915gm_info),
	INTEL_I945G_IDS(&intel_i945g_info),
	INTEL_I945GM_IDS(&intel_i945gm_info),
	INTEL_I965G_IDS(&intel_i965g_info),
	INTEL_G33_IDS(&intel_g33_info),
	INTEL_I965GM_IDS(&intel_i965gm_info),
	INTEL_GM45_IDS(&intel_gm45_info),
	INTEL_G45_IDS(&intel_g45_info),
	INTEL_PINEVIEW_IDS(&intel_pineview_info),
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
	INTEL_HSW_D_IDS(&intel_haswell_d_info),
	INTEL_HSW_M_IDS(&intel_haswell_m_info),
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
	INTEL_CHV_IDS(&intel_cherryview_info),
	INTEL_SKL_GT1_IDS(&intel_skylake_info),
	INTEL_SKL_GT2_IDS(&intel_skylake_info),
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
	INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
	INTEL_BXT_IDS(&intel_broxton_info),
	INTEL_KBL_GT1_IDS(&intel_kabylake_info),
	INTEL_KBL_GT2_IDS(&intel_kabylake_info),
	INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
	INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
	{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
	enum intel_pch ret = PCH_NOP;
	/*
         * In a virtualized passthrough environment we can be in a
         * setup where the ISA bridge is not able to be passed through.
         * In this case, a south bridge can be emulated and we have to
         * make an educated guess as to which PCH is really there.
         */
	if (IS_GEN5(dev)) {
		ret = PCH_IBX;
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
		ret = PCH_CPT;
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = PCH_LPT;
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
		ret = PCH_SPT;
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
	}
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| robert beckett | robert beckett | 107 | 95.54% | 1 | 50.00% | 
| rodrigo vivi | rodrigo vivi | 5 | 4.46% | 1 | 50.00% | 
 | Total | 112 | 100.00% | 2 | 100.00% | 
void intel_detect_pch(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct pci_dev *pch = NULL;
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
         * (which really amounts to a PCH but no South Display).
         */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}
	/*
         * The reason to probe ISA bridge instead of Dev31:Fun0 is to
         * make graphics device passthrough work easy for VMM, that only
         * need to expose ISA bridge to let driver know the real hardware
         * underneath. This is a requirement from virtualization team.
         *
         * In some virtualized environments (e.g. XEN), there is irrelevant
         * ISA bridge in the system. To work reliably, we should scan trhough
         * all the ISA bridge devices and check for the first match, instead
         * of only checking the first one.
         */
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
			dev_priv->pch_id = id;
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
				WARN_ON(!IS_GEN5(dev));
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
				WARN_ON(!IS_SKYLAKE(dev) &&
					!IS_KABYLAKE(dev));
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
				WARN_ON(!IS_SKYLAKE(dev) &&
					!IS_KABYLAKE(dev));
			} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_KBP;
				DRM_DEBUG_KMS("Found KabyPoint PCH\n");
				WARN_ON(!IS_KABYLAKE(dev));
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
				    pch->subsystem_vendor == 0x1af4 &&
				    pch->subsystem_device == 0x1100)) {
				dev_priv->pch_type = intel_virt_detect_pch(dev);
			} else
				continue;
			break;
		}
	}
	if (!pch)
		DRM_DEBUG_KMS("No PCH found.\n");
	pci_dev_put(pch);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| zhenyu wang | zhenyu wang | 70 | 15.45% | 1 | 4.55% | 
| rodrigo vivi | rodrigo vivi | 61 | 13.47% | 3 | 13.64% | 
| satheeshakrishna m | satheeshakrishna m | 58 | 12.80% | 1 | 4.55% | 
| daniel vetter | daniel vetter | 50 | 11.04% | 1 | 4.55% | 
| jesse barnes | jesse barnes | 46 | 10.15% | 3 | 13.64% | 
| ben widawsky | ben widawsky | 30 | 6.62% | 2 | 9.09% | 
| wei shun chang | wei shun chang | 28 | 6.18% | 1 | 4.55% | 
| gerd hoffmann | gerd hoffmann | 22 | 4.86% | 2 | 9.09% | 
| paulo zanoni | paulo zanoni | 19 | 4.19% | 2 | 9.09% | 
| eugeni dodonov | eugeni dodonov | 19 | 4.19% | 1 | 4.55% | 
| robert beckett | robert beckett | 18 | 3.97% | 1 | 4.55% | 
| imre deak | imre deak | 16 | 3.53% | 1 | 4.55% | 
| rui guo | rui guo | 13 | 2.87% | 1 | 4.55% | 
| damien lespiau | damien lespiau | 2 | 0.44% | 1 | 4.55% | 
| jani nikula | jani nikula | 1 | 0.22% | 1 | 4.55% | 
 | Total | 453 | 100.00% | 22 | 100.00% | 
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
		return false;
	if (i915.semaphores >= 0)
		return i915.semaphores;
	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;
	/* Until we get further testing... */
	if (IS_GEN8(dev))
		return false;
#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif
	return true;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben widawsky | ben widawsky | 46 | 55.42% | 1 | 16.67% | 
| rodrigo vivi | rodrigo vivi | 11 | 13.25% | 1 | 16.67% | 
| daniel vetter | daniel vetter | 10 | 12.05% | 2 | 33.33% | 
| oscar mateo | oscar mateo | 10 | 12.05% | 1 | 16.67% | 
| jani nikula | jani nikula | 6 | 7.23% | 1 | 16.67% | 
 | Total | 83 | 100.00% | 6 | 100.00% | 
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct intel_encoder *encoder;
	drm_modeset_lock_all(dev);
	for_each_intel_encoder(dev, encoder)
		if (encoder->suspend)
			encoder->suspend(encoder);
	drm_modeset_unlock_all(dev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 47 | 87.04% | 1 | 50.00% | 
| jani nikula | jani nikula | 7 | 12.96% | 1 | 50.00% | 
 | Total | 54 | 100.00% | 2 | 100.00% | 
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
			      bool rpm_resume);
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
	if (acpi_target_system_state() < ACPI_STATE_S3)
		return true;
#endif
	return false;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 32 | 100.00% | 1 | 100.00% | 
 | Total | 32 | 100.00% | 1 | 100.00% | 
static int i915_drm_suspend(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	pci_power_t opregion_target_state;
	int error;
	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);
	disable_rpm_wakeref_asserts(dev_priv);
	/* We do a lot of poking in a lot of registers, make sure they work
         * properly. */
	intel_display_set_init_power(dev_priv, true);
	drm_kms_helper_poll_disable(dev);
	pci_save_state(dev->pdev);
	error = i915_gem_suspend(dev);
	if (error) {
		dev_err(&dev->pdev->dev,
			"GEM idle failed, resume might fail\n");
		goto out;
	}
	intel_guc_suspend(dev);
	intel_suspend_gt_powersave(dev);
	intel_display_suspend(dev);
	intel_dp_mst_suspend(dev);
	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);
	intel_suspend_encoders(dev_priv);
	intel_suspend_hw(dev);
	i915_gem_suspend_gtt_mappings(dev);
	i915_save_state(dev);
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
	intel_opregion_notify_adapter(dev, opregion_target_state);
	intel_uncore_forcewake_reset(dev, false);
	intel_opregion_fini(dev);
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
	dev_priv->suspend_count++;
	intel_display_set_init_power(dev_priv, false);
	intel_csr_ucode_suspend(dev_priv);
out:
	enable_rpm_wakeref_asserts(dev_priv);
	return error;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| rafael j. wysocki | rafael j. wysocki | 60 | 27.52% | 2 | 5.26% | 
| imre deak | imre deak | 52 | 23.85% | 12 | 31.58% | 
| rui zhang | rui zhang | 23 | 10.55% | 1 | 2.63% | 
| dave airlie | dave airlie | 20 | 9.17% | 3 | 7.89% | 
| jesse barnes | jesse barnes | 17 | 7.80% | 5 | 13.16% | 
| daniel vetter | daniel vetter | 8 | 3.67% | 4 | 10.53% | 
| chris wilson | chris wilson | 7 | 3.21% | 3 | 7.89% | 
| kristen carlson accardi | kristen carlson accardi | 7 | 3.21% | 1 | 2.63% | 
| paulo zanoni | paulo zanoni | 6 | 2.75% | 2 | 5.26% | 
| mika kuoppala | mika kuoppala | 5 | 2.29% | 1 | 2.63% | 
| alex dai | alex dai | 5 | 2.29% | 1 | 2.63% | 
| ben widawsky | ben widawsky | 5 | 2.29% | 1 | 2.63% | 
| maarten lankhorst | maarten lankhorst | 2 | 0.92% | 1 | 2.63% | 
| damien lespiau | damien lespiau | 1 | 0.46% | 1 | 2.63% | 
 | Total | 218 | 100.00% | 38 | 100.00% | 
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
	bool fw_csr;
	int ret;
	disable_rpm_wakeref_asserts(dev_priv);
	fw_csr = !IS_BROXTON(dev_priv) &&
		suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
	/*
         * In case of firmware assisted context save/restore don't manually
         * deinit the power domains. This also means the CSR/DMC firmware will
         * stay active, it will power down any HW resources as required and
         * also enable deeper system power states that would be blocked if the
         * firmware was inactive.
         */
	if (!fw_csr)
		intel_power_domains_suspend(dev_priv);
	ret = 0;
	if (IS_BROXTON(dev_priv))
		bxt_enable_dc9(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_enable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
	if (ret) {
		DRM_ERROR("Suspend complete failed: %d\n", ret);
		if (!fw_csr)
			intel_power_domains_init_hw(dev_priv, true);
		goto out;
	}
	pci_disable_device(drm_dev->pdev);
	/*
         * During hibernation on some platforms the BIOS may try to access
         * the device even though it's already in D3 and hang the machine. So
         * leave the device in D0 on those platforms and hope the BIOS will
         * power down the device properly. The issue was seen on multiple old
         * GENs with different BIOS vendors, so having an explicit blacklist
         * is inpractical; apply the workaround on everything pre GEN6. The
         * platforms where the issue was seen:
         * Lenovo Thinkpad X301, X61s, X60, T60, X41
         * Fujitsu FSC S7110
         * Acer Aspire 1830T
         */
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
out:
	enable_rpm_wakeref_asserts(dev_priv);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 198 | 100.00% | 8 | 100.00% | 
 | Total | 198 | 100.00% | 8 | 100.00% | 
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
	int error;
	if (!dev || !dev->dev_private) {
		DRM_ERROR("dev: %p\n", dev);
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
			 state.event != PM_EVENT_FREEZE))
		return -EINVAL;
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	error = i915_drm_suspend(dev);
	if (error)
		return error;
	return i915_drm_suspend_late(dev, false);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jesse barnes | jesse barnes | 42 | 42.86% | 4 | 30.77% | 
| rafael j. wysocki | rafael j. wysocki | 24 | 24.49% | 1 | 7.69% | 
| imre deak | imre deak | 18 | 18.37% | 4 | 30.77% | 
| dave airlie | dave airlie | 10 | 10.20% | 1 | 7.69% | 
| keith packard | keith packard | 2 | 2.04% | 1 | 7.69% | 
| chris wilson | chris wilson | 1 | 1.02% | 1 | 7.69% | 
| maarten lankhorst | maarten lankhorst | 1 | 1.02% | 1 | 7.69% | 
 | Total | 98 | 100.00% | 13 | 100.00% | 
static int i915_drm_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	disable_rpm_wakeref_asserts(dev_priv);
	ret = i915_ggtt_enable_hw(dev);
	if (ret)
		DRM_ERROR("failed to re-enable GGTT\n");
	intel_csr_ucode_resume(dev_priv);
	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev);
	mutex_unlock(&dev->struct_mutex);
	i915_restore_state(dev);
	intel_opregion_setup(dev);
	intel_init_pch_refclk(dev);
	drm_mode_config_reset(dev);
	/*
         * Interrupts have to be enabled before any batches are run. If not the
         * GPU will hang. i915_gem_init_hw() will initiate batches to
         * update/restore the context.
         *
         * Modeset enabling in intel_modeset_init_hw() also needs working
         * interrupts.
         */
	intel_runtime_pm_enable_interrupts(dev_priv);
	mutex_lock(&dev->struct_mutex);
	if (i915_gem_init_hw(dev)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
	}
	mutex_unlock(&dev->struct_mutex);
	intel_guc_resume(dev);
	intel_modeset_init_hw(dev);
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
	spin_unlock_irq(&dev_priv->irq_lock);
	intel_dp_mst_resume(dev);
	intel_display_resume(dev);
	/*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
         * bother with the tiny race here where we might loose hotplug
         * notifications.
         * */
	intel_hpd_init(dev_priv);
	/* Config may have changed between suspend and resume */
	drm_helper_hpd_irq_event(dev);
	intel_opregion_init(dev);
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
	intel_opregion_notify_adapter(dev, PCI_D0);
	drm_kms_helper_poll_enable(dev);
	enable_rpm_wakeref_asserts(dev_priv);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 41 | 15.83% | 3 | 8.33% | 
| imre deak | imre deak | 39 | 15.06% | 5 | 13.89% | 
| jesse barnes | jesse barnes | 36 | 13.90% | 5 | 13.89% | 
| chris wilson | chris wilson | 33 | 12.74% | 4 | 11.11% | 
| paulo zanoni | paulo zanoni | 22 | 8.49% | 2 | 5.56% | 
| rui zhang | rui zhang | 22 | 8.49% | 1 | 2.78% | 
| ville syrjala | ville syrjala | 19 | 7.34% | 1 | 2.78% | 
| daniel vetter | daniel vetter | 15 | 5.79% | 7 | 19.44% | 
| rafael j. wysocki | rafael j. wysocki | 11 | 4.25% | 2 | 5.56% | 
| peter antoine | peter antoine | 6 | 2.32% | 1 | 2.78% | 
| sean paul | sean paul | 6 | 2.32% | 1 | 2.78% | 
| alex dai | alex dai | 5 | 1.93% | 1 | 2.78% | 
| stephen chandler paul | stephen chandler paul | 2 | 0.77% | 1 | 2.78% | 
| peter zijlstra | peter zijlstra | 1 | 0.39% | 1 | 2.78% | 
| damien lespiau | damien lespiau | 1 | 0.39% | 1 | 2.78% | 
 | Total | 259 | 100.00% | 36 | 100.00% | 
static int i915_drm_resume_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	/*
         * We have a resume ordering issue with the snd-hda driver also
         * requiring our device to be power up. Due to the lack of a
         * parent/child relationship we currently solve this with an early
         * resume hook.
         *
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
	/*
         * Note that we need to set the power state explicitly, since we
         * powered off the device during freeze and the PCI core won't power
         * it back up for us during thaw. Powering off the device during
         * freeze is not a hard requirement though, and during the
         * suspend/resume phases the PCI core makes sure we get here with the
         * device powered on. So in case we change our freeze logic and keep
         * the device powered we can also remove the following set power state
         * call.
         */
	ret = pci_set_power_state(dev->pdev, PCI_D0);
	if (ret) {
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
		goto out;
	}
	/*
         * Note that pci_enable_device() first enables any parent bridge
         * device and only then sets the power state for this device. The
         * bridge enabling is a nop though, since bridge devices are resumed
         * first. The order of enabling power and enabling the device is
         * imposed by the PCI core as described above, so here we preserve the
         * same order for the freeze/thaw phases.
         *
         * TODO: eventually we should remove pci_disable_device() /
         * pci_enable_enable_device() from suspend/resume. Due to how they
         * depend on the device enable refcount we can't anyway depend on them
         * disabling/enabling the device.
         */
	if (pci_enable_device(dev->pdev)) {
		ret = -EIO;
		goto out;
	}
	pci_set_master(dev->pdev);
	disable_rpm_wakeref_asserts(dev_priv);
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);
	intel_uncore_early_sanitize(dev, true);
	if (IS_BROXTON(dev)) {
		if (!dev_priv->suspended_to_idle)
			gen9_sanitize_dc_state(dev_priv);
		bxt_disable_dc9(dev_priv);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		hsw_disable_pc8(dev_priv);
	}
	intel_uncore_sanitize(dev);
	if (IS_BROXTON(dev_priv) ||
	    !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
		intel_power_domains_init_hw(dev_priv, true);
	enable_rpm_wakeref_asserts(dev_priv);
out:
	dev_priv->suspended_to_idle = false;
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 144 | 66.36% | 11 | 55.00% | 
| rafael j. wysocki | rafael j. wysocki | 19 | 8.76% | 1 | 5.00% | 
| damien lespiau | damien lespiau | 18 | 8.29% | 2 | 10.00% | 
| paulo zanoni | paulo zanoni | 17 | 7.83% | 2 | 10.00% | 
| jesse barnes | jesse barnes | 9 | 4.15% | 1 | 5.00% | 
| wayne boyer | wayne boyer | 5 | 2.30% | 1 | 5.00% | 
| suketu shah | suketu shah | 3 | 1.38% | 1 | 5.00% | 
| dave airlie | dave airlie | 2 | 0.92% | 1 | 5.00% | 
 | Total | 217 | 100.00% | 20 | 100.00% | 
int i915_resume_switcheroo(struct drm_device *dev)
{
	int ret;
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	ret = i915_drm_resume_early(dev);
	if (ret)
		return ret;
	return i915_drm_resume(dev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 43 | 97.73% | 5 | 83.33% | 
| maarten lankhorst | maarten lankhorst | 1 | 2.27% | 1 | 16.67% | 
 | Total | 44 | 100.00% | 6 | 100.00% | 
/**
 * i915_reset - reset chip after a hang
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
int i915_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gpu_error *error = &dev_priv->gpu_error;
	unsigned reset_counter;
	int ret;
	intel_reset_gt_powersave(dev);
	mutex_lock(&dev->struct_mutex);
	/* Clear any previous failed attempts at recovery. Time to try again. */
	atomic_andnot(I915_WEDGED, &error->reset_counter);
	/* Clear the reset-in-progress flag and increment the reset epoch. */
	reset_counter = atomic_inc_return(&error->reset_counter);
	if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
		ret = -EIO;
		goto error;
	}
	i915_gem_reset(dev);
	ret = intel_gpu_reset(dev, ALL_ENGINES);
	/* Also reset the gpu hangman. */
	if (error->stop_rings != 0) {
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
		error->stop_rings = 0;
		if (ret == -ENODEV) {
			DRM_INFO("Reset not implemented, but ignoring "
				 "error for simulated gpu hangs\n");
			ret = 0;
		}
	}
	if (i915_stop_ring_allow_warn(dev_priv))
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
	if (ret) {
		if (ret != -ENODEV)
			DRM_ERROR("Failed to reset chip: %i\n", ret);
		else
			DRM_DEBUG_DRIVER("GPU reset disabled\n");
		goto error;
	}
	intel_overlay_reset(dev_priv);
	/* Ok, now get things going again... */
	/*
         * Everything depends on having the GTT running, so we need to start
         * there.  Fortunately we don't need to do this unless we reset the
         * chip at a PCI level.
         *
         * Next we need to restore the context, but we don't use those
         * yet either...
         *
         * Ring buffer needs to be re-initialized in the KMS case, or if X
         * was running at the time of the reset (i.e. we weren't VT
         * switched away).
         */
	ret = i915_gem_init_hw(dev);
	if (ret) {
		DRM_ERROR("Failed hw init on reset %d\n", ret);
		goto error;
	}
	mutex_unlock(&dev->struct_mutex);
	/*
         * rps/rc6 re-init is necessary to restore state lost after the
         * reset and the re-install of gt irqs. Skip for ironlake per
         * previous concerns that it doesn't respond well to some forms
         * of re-init after reset.
         */
	if (INTEL_INFO(dev)->gen > 5)
		intel_enable_gt_powersave(dev);
	return 0;
error:
	atomic_or(I915_WEDGED, &error->reset_counter);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 162 | 60.67% | 4 | 23.53% | 
| ben gamari | ben gamari | 35 | 13.11% | 1 | 5.88% | 
| daniel vetter | daniel vetter | 29 | 10.86% | 5 | 29.41% | 
| jeff mcgee | jeff mcgee | 15 | 5.62% | 1 | 5.88% | 
| ben widawsky | ben widawsky | 8 | 3.00% | 1 | 5.88% | 
| imre deak | imre deak | 6 | 2.25% | 1 | 5.88% | 
| ville syrjala | ville syrjala | 5 | 1.87% | 1 | 5.88% | 
| kenneth graunke | kenneth graunke | 3 | 1.12% | 1 | 5.88% | 
| mika kuoppala | mika kuoppala | 2 | 0.75% | 1 | 5.88% | 
| jani nikula | jani nikula | 2 | 0.75% | 1 | 5.88% | 
 | Total | 267 | 100.00% | 17 | 100.00% | 
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct intel_device_info *intel_info =
		(struct intel_device_info *) ent->driver_data;
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
		DRM_INFO("This hardware requires preliminary hardware support.\n"
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
		return -ENODEV;
	}
	/* Only bind to function 0 of the device. Early generations
         * used function 1 as a placeholder for multi-head. This causes
         * us confusion instead, especially on the systems where both
         * functions have the same PCI-ID!
         */
	if (PCI_FUNC(pdev->devfn))
		return -ENODEV;
	/*
         * apple-gmux is needed on dual GPU MacBook Pro
         * to probe the panel if we're the inactive GPU.
         */
	if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
	    apple_gmux_present() && pdev != vga_default_device() &&
	    !vga_switcheroo_handler_flags())
		return -EPROBE_DEFER;
	return drm_get_pci_dev(pdev, ent, &driver);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 35 | 32.11% | 1 | 10.00% | 
| lukas wunner | lukas wunner | 29 | 26.61% | 1 | 10.00% | 
| ben widawsky | ben widawsky | 21 | 19.27% | 1 | 10.00% | 
| kenneth graunke | kenneth graunke | 11 | 10.09% | 1 | 10.00% | 
| daniel vetter | daniel vetter | 3 | 2.75% | 1 | 10.00% | 
| jani nikula | jani nikula | 3 | 2.75% | 1 | 10.00% | 
| ben gamari | ben gamari | 3 | 2.75% | 1 | 10.00% | 
| keith packard | keith packard | 2 | 1.83% | 2 | 20.00% | 
| eric anholt | eric anholt | 2 | 1.83% | 1 | 10.00% | 
 | Total | 109 | 100.00% | 10 | 100.00% | 
static void
i915_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	drm_put_dev(dev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 14 | 53.85% | 1 | 33.33% | 
| daniel vetter | daniel vetter | 8 | 30.77% | 1 | 33.33% | 
| kenneth graunke | kenneth graunke | 4 | 15.38% | 1 | 33.33% | 
 | Total | 26 | 100.00% | 3 | 100.00% | 
static int i915_pm_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
	if (!drm_dev || !drm_dev->dev_private) {
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
		return -ENODEV;
	}
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	return i915_drm_suspend(drm_dev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chris wilson | chris wilson | 42 | 59.15% | 4 | 40.00% | 
| eric anholt | eric anholt | 16 | 22.54% | 1 | 10.00% | 
| keith packard | keith packard | 8 | 11.27% | 2 | 20.00% | 
| imre deak | imre deak | 4 | 5.63% | 2 | 20.00% | 
| ben gamari | ben gamari | 1 | 1.41% | 1 | 10.00% | 
 | Total | 71 | 100.00% | 10 | 100.00% | 
static int i915_pm_suspend_late(struct device *dev)
{
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
	/*
         * We have a suspend ordering issue with the snd-hda driver also
         * requiring our device to be power up. Due to the lack of a
         * parent/child relationship we currently solve this with an late
         * suspend hook.
         *
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	return i915_drm_suspend_late(drm_dev, false);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 36 | 83.72% | 4 | 44.44% | 
| chris wilson | chris wilson | 2 | 4.65% | 1 | 11.11% | 
| kenneth graunke | kenneth graunke | 2 | 4.65% | 1 | 11.11% | 
| keith packard | keith packard | 1 | 2.33% | 1 | 11.11% | 
| damien lespiau | damien lespiau | 1 | 2.33% | 1 | 11.11% | 
| ben gamari | ben gamari | 1 | 2.33% | 1 | 11.11% | 
 | Total | 43 | 100.00% | 9 | 100.00% | 
static int i915_pm_poweroff_late(struct device *dev)
{
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	return i915_drm_suspend_late(drm_dev, true);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 39 | 92.86% | 2 | 40.00% | 
| eric anholt | eric anholt | 1 | 2.38% | 1 | 20.00% | 
| paulo zanoni | paulo zanoni | 1 | 2.38% | 1 | 20.00% | 
| keith packard | keith packard | 1 | 2.38% | 1 | 20.00% | 
 | Total | 42 | 100.00% | 5 | 100.00% | 
static int i915_pm_resume_early(struct device *dev)
{
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	return i915_drm_resume_early(drm_dev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 21 | 52.50% | 4 | 80.00% | 
| suketu shah | suketu shah | 19 | 47.50% | 1 | 20.00% | 
 | Total | 40 | 100.00% | 5 | 100.00% | 
static int i915_pm_resume(struct device *dev)
{
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;
	return i915_drm_resume(drm_dev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| suketu shah | suketu shah | 19 | 47.50% | 1 | 14.29% | 
| imre deak | imre deak | 11 | 27.50% | 3 | 42.86% | 
| chris wilson | chris wilson | 7 | 17.50% | 2 | 28.57% | 
| daniel vetter | daniel vetter | 3 | 7.50% | 1 | 14.29% | 
 | Total | 40 | 100.00% | 7 | 100.00% | 
/*
 * Save all Gunit registers that may be lost after a D3 and a subsequent
 * S0i[R123] transition. The list of registers needing a save/restore is
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
 * registers in the following way:
 * - Driver: saved/restored by the driver
 * - Punit : saved/restored by the Punit firmware
 * - No, w/o marking: no need to save/restore, since the register is R/O or
 *                    used internally by the HW in a way that doesn't depend
 *                    keeping the content across a suspend/resume.
 * - Debug : used for debugging
 *
 * We save/restore all registers marked with 'Driver', with the following
 * exceptions:
 * - Registers out of use, including also registers marked with 'Debug'.
 *   These have no effect on the driver's operation, so we don't save/restore
 *   them to reduce the overhead.
 * - Registers that are fully setup by an initialization function called from
 *   the resume path. For example many clock gating and RPS/RC6 registers.
 * - Registers that provide the right functionality with their reset defaults.
 *
 * TODO: Except for registers that based on the above 3 criteria can be safely
 * ignored, we save/restore all others, practically treating the HW context as
 * a black-box for the driver. Further investigation is needed to reduce the
 * saved/restored registers even further, by following the same 3 criteria.
 */
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	int i;
	/* GAM 0x4000-0x4770 */
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
	s->arb_mode		= I915_READ(ARB_MODE);
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
	s->ecochk		= I915_READ(GAM_ECOCHK);
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
	/* MBC 0x9024-0x91D0, 0x8500 */
	s->g3dctl		= I915_READ(VLV_G3DCTL);
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
	s->mbctl		= I915_READ(GEN6_MBCTL);
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
	s->rstctl		= I915_READ(GEN6_RSTCTL);
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
	s->ecobus		= I915_READ(ECOBUS);
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
	s->rcedata		= I915_READ(VLV_RCEDATA);
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	s->gt_imr		= I915_READ(GTIMR);
	s->gt_ier		= I915_READ(GTIER);
	s->pm_imr		= I915_READ(GEN6_PMIMR);
	s->pm_ier		= I915_READ(GEN6_PMIER);
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
	/* GT SA CZ domain, 0x100000-0x138124 */
	s->tilectl		= I915_READ(TILECTL);
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
	s->pcbr			= I915_READ(VLV_PCBR);
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
	/*
         * Not saving any of:
         * DFT,         0x9800-0x9EC0
         * SARB,        0xB000-0xB1FC
         * GAC,         0x5208-0x524C, 0x14000-0x14C000
         * PCI CFG
         */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 479 | 96.96% | 2 | 50.00% | 
| jesse barnes | jesse barnes | 9 | 1.82% | 1 | 25.00% | 
| ville syrjala | ville syrjala | 6 | 1.21% | 1 | 25.00% | 
 | Total | 494 | 100.00% | 4 | 100.00% | 
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
	u32 val;
	int i;
	/* GAM 0x4000-0x4770 */
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
	/* MBC 0x9024-0x91D0, 0x8500 */
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
	I915_WRITE(ECOBUS,		s->ecobus);
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
	I915_WRITE(GTIMR,		s->gt_imr);
	I915_WRITE(GTIER,		s->gt_ier);
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
	/* GT SA CZ domain, 0x100000-0x138124 */
	I915_WRITE(TILECTL,			s->tilectl);
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
	/*
         * Preserve the GT allow wake and GFX force clock bit, they are not
         * be restored, as they are used to control the s0ix suspend/resume
         * sequence by the caller.
         */
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= VLV_GTLC_ALLOWWAKEREQ;
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
	I915_WRITE(VLV_PCBR,			s->pcbr);
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 524 | 97.22% | 2 | 50.00% | 
| jesse barnes | jesse barnes | 9 | 1.67% | 1 | 25.00% | 
| ville syrjala | ville syrjala | 6 | 1.11% | 1 | 25.00% | 
 | Total | 539 | 100.00% | 4 | 100.00% | 
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
	u32 val;
	int err;
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
	if (force_on)
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
	if (!force_on)
		return 0;
	err = wait_for(COND, 20);
	if (err)
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
	return err;
#undef COND
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 87 | 100.00% | 1 | 100.00% | 
 | Total | 87 | 100.00% | 1 | 100.00% | 
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
	u32 val;
	int err = 0;
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
	if (allow)
		val |= VLV_GTLC_ALLOWWAKEREQ;
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
              allow)
	err = wait_for(COND, 1);
	if (err)
		DRM_ERROR("timeout disabling GT waking\n");
	return err;
#undef COND
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 82 | 100.00% | 1 | 100.00% | 
 | Total | 82 | 100.00% | 1 | 100.00% | 
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
				 bool wait_for_on)
{
	u32 mask;
	u32 val;
	int err;
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
	val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
	if (COND)
		return 0;
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
		      onoff(wait_for_on),
		      I915_READ(VLV_GTLC_PW_STATUS));
	/*
         * RC6 transitioning can be delayed up to 2 msec (see
         * valleyview_enable_rps), use 3 msec for safety.
         */
	err = wait_for(COND, 3);
	if (err)
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
			  onoff(wait_for_on));
	return err;
#undef COND
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 87 | 93.55% | 1 | 50.00% | 
| jani nikula | jani nikula | 6 | 6.45% | 1 | 50.00% | 
 | Total | 93 | 100.00% | 2 | 100.00% | 
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
		return;
	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 35 | 97.22% | 1 | 50.00% | 
| daniel vetter | daniel vetter | 1 | 2.78% | 1 | 50.00% | 
 | Total | 36 | 100.00% | 2 | 100.00% | 
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
{
	u32 mask;
	int err;
	/*
         * Bspec defines the following GT well on flags as debug only, so
         * don't treat them as hard failures.
         */
	(void)vlv_wait_for_gt_wells(dev_priv, false);
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
	vlv_check_no_gt_access(dev_priv);
	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;
	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;
	if (!IS_CHERRYVIEW(dev_priv))
		vlv_save_gunit_s0ix_state(dev_priv);
	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;
	return 0;
err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 130 | 93.53% | 2 | 50.00% | 
| deepak s | deepak s | 8 | 5.76% | 1 | 25.00% | 
| sagar arun kamble | sagar arun kamble | 1 | 0.72% | 1 | 25.00% | 
 | Total | 139 | 100.00% | 4 | 100.00% | 
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
{
	struct drm_device *dev = dev_priv->dev;
	int err;
	int ret;
	/*
         * If any of the steps fail just try to continue, that's the best we
         * can do at this point. Return the first error code (which will also
         * leave RPM permanently disabled).
         */
	ret = vlv_force_gfx_clock(dev_priv, true);
	if (!IS_CHERRYVIEW(dev_priv))
		vlv_restore_gunit_s0ix_state(dev_priv);
	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;
	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;
	vlv_check_no_gt_access(dev_priv);
	if (rpm_resume) {
		intel_init_clock_gating(dev);
		i915_gem_restore_fences(dev);
	}
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 94 | 83.93% | 2 | 40.00% | 
| sagar arun kamble | sagar arun kamble | 10 | 8.93% | 2 | 40.00% | 
| deepak s | deepak s | 8 | 7.14% | 1 | 20.00% | 
 | Total | 112 | 100.00% | 5 | 100.00% | 
static int intel_runtime_suspend(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
		return -ENODEV;
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;
	DRM_DEBUG_KMS("Suspending device\n");
	/*
         * We could deadlock here in case another thread holding struct_mutex
         * calls RPM suspend concurrently, since the RPM suspend will wait
         * first for this RPM suspend to finish. In this case the concurrent
         * RPM resume will be followed by its RPM suspend counterpart. Still
         * for consistency return -EAGAIN, which will reschedule this suspend.
         */
	if (!mutex_trylock(&dev->struct_mutex)) {
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
		/*
                 * Bump the expiration timestamp, otherwise the suspend won't
                 * be rescheduled.
                 */
		pm_runtime_mark_last_busy(device);
		return -EAGAIN;
	}
	disable_rpm_wakeref_asserts(dev_priv);
	/*
         * We are safe here against re-faults, since the fault handler takes
         * an RPM reference.
         */
	i915_gem_release_all_mmaps(dev_priv);
	mutex_unlock(&dev->struct_mutex);
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
	intel_guc_suspend(dev);
	intel_suspend_gt_powersave(dev);
	intel_runtime_pm_disable_interrupts(dev_priv);
	ret = 0;
	if (IS_BROXTON(dev_priv)) {
		bxt_display_core_uninit(dev_priv);
		bxt_enable_dc9(dev_priv);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		hsw_enable_pc8(dev_priv);
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		ret = vlv_suspend_complete(dev_priv);
	}
	if (ret) {
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
		intel_runtime_pm_enable_interrupts(dev_priv);
		enable_rpm_wakeref_asserts(dev_priv);
		return ret;
	}
	intel_uncore_forcewake_reset(dev, false);
	enable_rpm_wakeref_asserts(dev_priv);
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
		DRM_ERROR("Unclaimed access detected prior to suspending\n");
	dev_priv->pm.suspended = true;
	/*
         * FIXME: We really should find a document that references the arguments
         * used below!
         */
	if (IS_BROADWELL(dev)) {
		/*
                 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
                 * being detected, and the call we do at intel_runtime_resume()
                 * won't be able to restore them. Since PCI_D3hot matches the
                 * actual specification and appears to be working, use it.
                 */
		intel_opregion_notify_adapter(dev, PCI_D3hot);
	} else {
		/*
                 * current versions of firmware which depend on this opregion
                 * notification have repurposed the D1 definition to mean
                 * "runtime suspended" vs. what you would normally expect (D3)
                 * to distinguish it from notifications that might be sent via
                 * the suspend path.
                 */
		intel_opregion_notify_adapter(dev, PCI_D1);
	}
	assert_forcewakes_inactive(dev_priv);
	DRM_DEBUG_KMS("Device suspended\n");
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 198 | 58.75% | 11 | 44.00% | 
| paulo zanoni | paulo zanoni | 99 | 29.38% | 7 | 28.00% | 
| mika kuoppala | mika kuoppala | 13 | 3.86% | 3 | 12.00% | 
| joonas lahtinen | joonas lahtinen | 10 | 2.97% | 1 | 4.00% | 
| chris wilson | chris wilson | 9 | 2.67% | 1 | 4.00% | 
| alex dai | alex dai | 5 | 1.48% | 1 | 4.00% | 
| daniel vetter | daniel vetter | 3 | 0.89% | 1 | 4.00% | 
 | Total | 337 | 100.00% | 25 | 100.00% | 
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;
	DRM_DEBUG_KMS("Resuming device\n");
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);
	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
	intel_guc_resume(dev);
	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);
	if (IS_BROXTON(dev)) {
		bxt_disable_dc9(dev_priv);
		bxt_display_core_init(dev_priv, true);
		if (dev_priv->csr.dmc_payload &&
		    (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
			gen9_enable_dc5(dev_priv);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		hsw_disable_pc8(dev_priv);
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		ret = vlv_resume_prepare(dev_priv, true);
	}
	/*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
         */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);
	intel_runtime_pm_enable_interrupts(dev_priv);
	/*
         * On VLV/CHV display interrupts are part of the display
         * power well, so hpd is reinitialized from there. For
         * everyone else do it here.
         */
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
		intel_hpd_init(dev_priv);
	intel_enable_gt_powersave(dev);
	enable_rpm_wakeref_asserts(dev_priv);
	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| paulo zanoni | paulo zanoni | 115 | 40.93% | 6 | 31.58% | 
| imre deak | imre deak | 69 | 24.56% | 5 | 26.32% | 
| sagar arun kamble | sagar arun kamble | 38 | 13.52% | 2 | 10.53% | 
| suketu shah | suketu shah | 15 | 5.34% | 1 | 5.26% | 
| ville syrjala | ville syrjala | 14 | 4.98% | 1 | 5.26% | 
| mika kuoppala | mika kuoppala | 12 | 4.27% | 1 | 5.26% | 
| wayne boyer | wayne boyer | 11 | 3.91% | 1 | 5.26% | 
| alex dai | alex dai | 5 | 1.78% | 1 | 5.26% | 
| daniel vetter | daniel vetter | 2 | 0.71% | 1 | 5.26% | 
 | Total | 281 | 100.00% | 19 | 100.00% | 
static const struct dev_pm_ops i915_pm_ops = {
	/*
         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
         * PMSG_RESUME]
         */
	.suspend = i915_pm_suspend,
	.suspend_late = i915_pm_suspend_late,
	.resume_early = i915_pm_resume_early,
	.resume = i915_pm_resume,
	/*
         * S4 event handlers
         * @freeze, @freeze_late    : called (1) before creating the
         *                            hibernation image [PMSG_FREEZE] and
         *                            (2) after rebooting, before restoring
         *                            the image [PMSG_QUIESCE]
         * @thaw, @thaw_early       : called (1) after creating the hibernation
         *                            image, before writing it [PMSG_THAW]
         *                            and (2) after failing to create or
         *                            restore the image [PMSG_RECOVER]
         * @poweroff, @poweroff_late: called after writing the hibernation
         *                            image, before rebooting [PMSG_HIBERNATE]
         * @restore, @restore_early : called after rebooting and restoring the
         *                            hibernation image [PMSG_RESTORE]
         */
	.freeze = i915_pm_suspend,
	.freeze_late = i915_pm_suspend_late,
	.thaw_early = i915_pm_resume_early,
	.thaw = i915_pm_resume,
	.poweroff = i915_pm_suspend,
	.poweroff_late = i915_pm_poweroff_late,
	.restore_early = i915_pm_resume_early,
	.restore = i915_pm_resume,
	/* S0ix (via runtime suspend) event handlers */
	.runtime_suspend = intel_runtime_suspend,
	.runtime_resume = intel_runtime_resume,
};
static const struct vm_operations_struct i915_gem_vm_ops = {
	.fault = i915_gem_fault,
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
};
static const struct file_operations i915_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = i915_compat_ioctl,
#endif
	.llseek = noop_llseek,
};
static struct drm_driver driver = {
	/* Don't use MTRRs here; the Xserver or userspace app should
         * deal with them for Intel hardware.
         */
	.driver_features =
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
	    DRIVER_RENDER | DRIVER_MODESET,
	.load = i915_driver_load,
	.unload = i915_driver_unload,
	.open = i915_driver_open,
	.lastclose = i915_driver_lastclose,
	.preclose = i915_driver_preclose,
	.postclose = i915_driver_postclose,
	.set_busid = drm_pci_set_busid,
#if defined(CONFIG_DEBUG_FS)
	.debugfs_init = i915_debugfs_init,
	.debugfs_cleanup = i915_debugfs_cleanup,
#endif
	.gem_free_object = i915_gem_free_object,
	.gem_vm_ops = &i915_gem_vm_ops,
	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = i915_gem_prime_export,
	.gem_prime_import = i915_gem_prime_import,
	.dumb_create = i915_gem_dumb_create,
	.dumb_map_offset = i915_gem_mmap_gtt,
	.dumb_destroy = drm_gem_dumb_destroy,
	.ioctls = i915_ioctls,
	.fops = &i915_driver_fops,
	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver i915_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = i915_pci_probe,
	.remove = i915_pci_remove,
	.driver.pm = &i915_pm_ops,
};
static int __init i915_init(void)
{
	driver.num_ioctls = i915_max_ioctl;
	/*
         * Enable KMS by default, unless explicitly overriden by
         * either the i915.modeset prarameter or by the
         * vga_text_mode_force boot option.
         */
	if (i915.modeset == 0)
		driver.driver_features &= ~DRIVER_MODESET;
	if (vgacon_text_force() && i915.modeset == -1)
		driver.driver_features &= ~DRIVER_MODESET;
	if (!(driver.driver_features & DRIVER_MODESET)) {
		/* Silently fail loading to not upset userspace. */
		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
		return 0;
	}
	if (i915.nuclear_pageflip)
		driver.driver_features |= DRIVER_ATOMIC;
	return drm_pci_init(&driver, &i915_pci_driver);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jesse barnes | jesse barnes | 19 | 20.21% | 1 | 7.69% | 
| dave airlie | dave airlie | 18 | 19.15% | 4 | 30.77% | 
| ben widawsky | ben widawsky | 15 | 15.96% | 1 | 7.69% | 
| chris wilson | chris wilson | 13 | 13.83% | 2 | 15.38% | 
| jani nikula | jani nikula | 11 | 11.70% | 2 | 15.38% | 
| matt roper | matt roper | 10 | 10.64% | 1 | 7.69% | 
| daniel vetter | daniel vetter | 6 | 6.38% | 1 | 7.69% | 
| maarten lankhorst | maarten lankhorst | 2 | 2.13% | 1 | 7.69% | 
 | Total | 94 | 100.00% | 13 | 100.00% | 
static void __exit i915_exit(void)
{
	if (!(driver.driver_features & DRIVER_MODESET))
		return; /* Never loaded a driver. */
	drm_pci_exit(&driver, &i915_pci_driver);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| dave airlie | dave airlie | 16 | 51.61% | 3 | 60.00% | 
| daniel vetter | daniel vetter | 13 | 41.94% | 1 | 20.00% | 
| ben widawsky | ben widawsky | 2 | 6.45% | 1 | 20.00% | 
 | Total | 31 | 100.00% | 5 | 100.00% | 
module_init(i915_init);
module_exit(i915_exit);
MODULE_AUTHOR("Tungsten Graphics, Inc.");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| imre deak | imre deak | 2532 | 36.90% | 45 | 16.42% | 
| chris wilson | chris wilson | 493 | 7.19% | 22 | 8.03% | 
| kristian hogsberg | kristian hogsberg | 413 | 6.02% | 5 | 1.82% | 
| ben widawsky | ben widawsky | 387 | 5.64% | 13 | 4.74% | 
| paulo zanoni | paulo zanoni | 295 | 4.30% | 18 | 6.57% | 
| jesse barnes | jesse barnes | 283 | 4.12% | 21 | 7.66% | 
| jani nikula | jani nikula | 240 | 3.50% | 7 | 2.55% | 
| dave airlie | dave airlie | 224 | 3.26% | 15 | 5.47% | 
| daniel vetter | daniel vetter | 179 | 2.61% | 25 | 9.12% | 
| ville syrjala | ville syrjala | 170 | 2.48% | 11 | 4.01% | 
| damien lespiau | damien lespiau | 142 | 2.07% | 10 | 3.65% | 
| rodrigo vivi | rodrigo vivi | 131 | 1.91% | 5 | 1.82% | 
| robert beckett | robert beckett | 125 | 1.82% | 1 | 0.36% | 
| rafael j. wysocki | rafael j. wysocki | 118 | 1.72% | 2 | 0.73% | 
| zhenyu wang | zhenyu wang | 113 | 1.65% | 2 | 0.73% | 
| eric anholt | eric anholt | 85 | 1.24% | 5 | 1.82% | 
| satheeshakrishna m | satheeshakrishna m | 63 | 0.92% | 2 | 0.73% | 
| sagar arun kamble | sagar arun kamble | 61 | 0.89% | 2 | 0.73% | 
| yakui zhao | yakui zhao | 59 | 0.86% | 3 | 1.09% | 
| suketu shah | suketu shah | 58 | 0.85% | 2 | 0.73% | 
| arjan van de ven | arjan van de ven | 57 | 0.83% | 1 | 0.36% | 
| eugeni dodonov | eugeni dodonov | 56 | 0.82% | 3 | 1.09% | 
| ben gamari | ben gamari | 52 | 0.76% | 3 | 1.09% | 
| rui zhang | rui zhang | 45 | 0.66% | 1 | 0.36% | 
| wayne boyer | wayne boyer | 42 | 0.61% | 2 | 0.73% | 
| antti koskipaa | antti koskipaa | 42 | 0.61% | 1 | 0.36% | 
| deepak s | deepak s | 40 | 0.58% | 3 | 1.09% | 
| lukas wunner | lukas wunner | 38 | 0.55% | 1 | 0.36% | 
| mika kuoppala | mika kuoppala | 38 | 0.55% | 6 | 2.19% | 
| wei shun chang | wei shun chang | 28 | 0.41% | 1 | 0.36% | 
| lionel landwerlin | lionel landwerlin | 27 | 0.39% | 2 | 0.73% | 
| kenneth graunke | kenneth graunke | 23 | 0.34% | 2 | 0.73% | 
| gerd hoffmann | gerd hoffmann | 22 | 0.32% | 2 | 0.73% | 
| alex dai | alex dai | 20 | 0.29% | 1 | 0.36% | 
| tobias klauser | tobias klauser | 18 | 0.26% | 1 | 0.36% | 
| jeff mcgee | jeff mcgee | 15 | 0.22% | 1 | 0.36% | 
| keith packard | keith packard | 14 | 0.20% | 3 | 1.09% | 
| rui guo | rui guo | 13 | 0.19% | 1 | 0.36% | 
| matt roper | matt roper | 10 | 0.15% | 1 | 0.36% | 
| oscar mateo | oscar mateo | 10 | 0.15% | 1 | 0.36% | 
| joonas lahtinen | joonas lahtinen | 10 | 0.15% | 1 | 0.36% | 
| maarten lankhorst | maarten lankhorst | 8 | 0.12% | 3 | 1.09% | 
| kristen carlson accardi | kristen carlson accardi | 7 | 0.10% | 1 | 0.36% | 
| peter antoine | peter antoine | 6 | 0.09% | 1 | 0.36% | 
| sean paul | sean paul | 6 | 0.09% | 1 | 0.36% | 
| rafael barbalho | rafael barbalho | 6 | 0.09% | 1 | 0.36% | 
| adam jackson | adam jackson | 5 | 0.07% | 1 | 0.36% | 
| david herrmann | david herrmann | 5 | 0.07% | 1 | 0.36% | 
| yuanhan liu | yuanhan liu | 5 | 0.07% | 1 | 0.36% | 
| daisy sun | daisy sun | 5 | 0.07% | 1 | 0.36% | 
| haihao xiang | haihao xiang | 3 | 0.04% | 1 | 0.36% | 
| david howells | david howells | 3 | 0.04% | 1 | 0.36% | 
| paul gortmaker | paul gortmaker | 3 | 0.04% | 1 | 0.36% | 
| alex shi | alex shi | 2 | 0.03% | 1 | 0.36% | 
| stephen chandler paul | stephen chandler paul | 2 | 0.03% | 1 | 0.36% | 
| peter zijlstra | peter zijlstra | 1 | 0.01% | 1 | 0.36% | 
| michael witten | michael witten | 1 | 0.01% | 1 | 0.36% | 
| li peng | li peng | 1 | 0.01% | 1 | 0.36% | 
| laurent pinchart | laurent pinchart | 1 | 0.01% | 1 | 0.36% | 
 | Total | 6861 | 100.00% | 274 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.