Contributors: 25
Author Tokens Token Proportion Commits Commit Proportion
Jani Nikula 318 37.15% 10 17.86%
Chris Wilson 197 23.01% 7 12.50%
Jesse Barnes 117 13.67% 4 7.14%
Imre Deak 62 7.24% 8 14.29%
Zhenyu Wang 38 4.44% 1 1.79%
Dave Airlie 29 3.39% 4 7.14%
Matt Roper 15 1.75% 2 3.57%
Sinan Kaya 15 1.75% 1 1.79%
Tvrtko A. Ursulin 8 0.93% 1 1.79%
Wambui Karuga 8 0.93% 1 1.79%
Ville Syrjälä 7 0.82% 1 1.79%
Joonas Lahtinen 6 0.70% 1 1.79%
Rafael J. Wysocki 6 0.70% 2 3.57%
Lucas De Marchi 5 0.58% 1 1.79%
Eric Anholt 4 0.47% 1 1.79%
Thomas Zimmermann 4 0.47% 1 1.79%
Daniel Vetter 4 0.47% 2 3.57%
Keith Packard 3 0.35% 1 1.79%
Paul Gortmaker 2 0.23% 1 1.79%
Paulo Zanoni 2 0.23% 1 1.79%
Alex Dai 2 0.23% 1 1.79%
Sean Paul 1 0.12% 1 1.79%
Peter Antoine 1 0.12% 1 1.79%
David Howells 1 0.12% 1 1.79%
Michel Dänzer 1 0.12% 1 1.79%
Total 856 56


// SPDX-License-Identifier: MIT
/*
 * Copyright © 2023 Intel Corporation
 */

#include <linux/pci.h>
#include <linux/pnp.h>

#include <drm/drm_managed.h>
#include <drm/i915_drm.h>

#include "i915_drv.h"
#include "intel_gmch.h"
#include "intel_pci_config.h"

static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
{
	pci_dev_put(bridge);
}

int intel_gmch_bridge_setup(struct drm_i915_private *i915)
{
	int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);

	i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
	if (!i915->gmch.pdev) {
		drm_err(&i915->drm, "bridge device not found\n");
		return -EIO;
	}

	return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
					i915->gmch.pdev);
}

/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_i915_private *i915)
{
	int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret;

	if (GRAPHICS_VER(i915) >= 4)
		pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
	pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
		return 0;
#endif

	/* Get some space for it */
	i915->gmch.mch_res.name = "i915 MCHBAR";
	i915->gmch.mch_res.flags = IORESOURCE_MEM;
	ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
				     &i915->gmch.mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0, pcibios_align_resource,
				     i915->gmch.pdev);
	if (ret) {
		drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
		i915->gmch.mch_res.start = 0;
		return ret;
	}

	if (GRAPHICS_VER(i915) >= 4)
		pci_write_config_dword(i915->gmch.pdev, reg + 4,
				       upper_32_bits(i915->gmch.mch_res.start));

	pci_write_config_dword(i915->gmch.pdev, reg,
			       lower_32_bits(i915->gmch.mch_res.start));
	return 0;
}

/* Setup MCHBAR if possible, return true if we should disable it again */
void intel_gmch_bar_setup(struct drm_i915_private *i915)
{
	int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp;
	bool enabled;

	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
		return;

	i915->gmch.mchbar_need_disable = false;

	if (IS_I915G(i915) || IS_I915GM(i915)) {
		pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

	if (intel_alloc_mchbar_resource(i915))
		return;

	i915->gmch.mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
	if (IS_I915G(i915) || IS_I915GM(i915)) {
		pci_write_config_dword(i915->gmch.pdev, DEVEN,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
		pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
	}
}

void intel_gmch_bar_teardown(struct drm_i915_private *i915)
{
	int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;

	if (i915->gmch.mchbar_need_disable) {
		if (IS_I915G(i915) || IS_I915GM(i915)) {
			u32 deven_val;

			pci_read_config_dword(i915->gmch.pdev, DEVEN,
					      &deven_val);
			deven_val &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(i915->gmch.pdev, DEVEN,
					       deven_val);
		} else {
			u32 mchbar_val;

			pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
					      &mchbar_val);
			mchbar_val &= ~1;
			pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
					       mchbar_val);
		}
	}

	if (i915->gmch.mch_res.start)
		release_resource(&i915->gmch.mch_res);
}

int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
	unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
	u16 gmch_ctrl;

	if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
		drm_err(&i915->drm, "failed to read control word\n");
		return -EIO;
	}

	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
		return 0;

	if (enable_decode)
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
	else
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;

	if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
		drm_err(&i915->drm, "failed to write control word\n");
		return -EIO;
	}

	return 0;
}