Contributors: 27
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jani Nikula |
356 |
39.16% |
10 |
16.39% |
Chris Wilson |
203 |
22.33% |
9 |
14.75% |
Jesse Barnes |
116 |
12.76% |
4 |
6.56% |
Imre Deak |
61 |
6.71% |
8 |
13.11% |
Zhenyu Wang |
34 |
3.74% |
1 |
1.64% |
Dave Airlie |
29 |
3.19% |
4 |
6.56% |
Matt Roper |
15 |
1.65% |
2 |
3.28% |
Sinan Kaya |
15 |
1.65% |
1 |
1.64% |
Tvrtko A. Ursulin |
8 |
0.88% |
1 |
1.64% |
Wambui Karuga |
8 |
0.88% |
1 |
1.64% |
Ville Syrjälä |
7 |
0.77% |
1 |
1.64% |
Christoph Hellwig |
7 |
0.77% |
1 |
1.64% |
Rafael J. Wysocki |
6 |
0.66% |
2 |
3.28% |
Joonas Lahtinen |
6 |
0.66% |
1 |
1.64% |
Lucas De Marchi |
5 |
0.55% |
1 |
1.64% |
Arnd Bergmann |
5 |
0.55% |
1 |
1.64% |
Paulo Zanoni |
5 |
0.55% |
2 |
3.28% |
Eric Anholt |
4 |
0.44% |
1 |
1.64% |
Thomas Zimmermann |
4 |
0.44% |
1 |
1.64% |
Daniel Vetter |
4 |
0.44% |
2 |
3.28% |
Uma Shankar |
3 |
0.33% |
1 |
1.64% |
Alex Dai |
2 |
0.22% |
1 |
1.64% |
Paul Gortmaker |
2 |
0.22% |
1 |
1.64% |
Sean Paul |
1 |
0.11% |
1 |
1.64% |
David Howells |
1 |
0.11% |
1 |
1.64% |
Peter Antoine |
1 |
0.11% |
1 |
1.64% |
Michel Dänzer |
1 |
0.11% |
1 |
1.64% |
Total |
909 |
|
61 |
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/vgaarb.h>
#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_gmch.h"
#include "intel_pci_config.h"
static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
{
pci_dev_put(bridge);
}
int intel_gmch_bridge_setup(struct drm_i915_private *i915)
{
int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);
i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!i915->gmch.pdev) {
drm_err(&i915->drm, "bridge device not found\n");
return -EIO;
}
return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
i915->gmch.pdev);
}
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_i915_private *i915)
{
int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
if (GRAPHICS_VER(i915) >= 4)
pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
if (IS_ENABLED(CONFIG_PNP) && mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
return 0;
/* Get some space for it */
i915->gmch.mch_res.name = "i915 MCHBAR";
i915->gmch.mch_res.flags = IORESOURCE_MEM;
ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
&i915->gmch.mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
0, pcibios_align_resource,
i915->gmch.pdev);
if (ret) {
drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
i915->gmch.mch_res.start = 0;
return ret;
}
if (GRAPHICS_VER(i915) >= 4)
pci_write_config_dword(i915->gmch.pdev, reg + 4,
upper_32_bits(i915->gmch.mch_res.start));
pci_write_config_dword(i915->gmch.pdev, reg,
lower_32_bits(i915->gmch.mch_res.start));
return 0;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
void intel_gmch_bar_setup(struct drm_i915_private *i915)
{
int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return;
i915->gmch.mchbar_need_disable = false;
if (IS_I915G(i915) || IS_I915GM(i915)) {
pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
enabled = temp & 1;
}
/* If it's already enabled, don't have to do anything */
if (enabled)
return;
if (intel_alloc_mchbar_resource(i915))
return;
i915->gmch.mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(i915) || IS_I915GM(i915)) {
pci_write_config_dword(i915->gmch.pdev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
}
}
void intel_gmch_bar_teardown(struct drm_i915_private *i915)
{
int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (i915->gmch.mchbar_need_disable) {
if (IS_I915G(i915) || IS_I915GM(i915)) {
u32 deven_val;
pci_read_config_dword(i915->gmch.pdev, DEVEN,
&deven_val);
deven_val &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(i915->gmch.pdev, DEVEN,
deven_val);
} else {
u32 mchbar_val;
pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
&mchbar_val);
mchbar_val &= ~1;
pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
mchbar_val);
}
}
if (i915->gmch.mch_res.start)
release_resource(&i915->gmch.mch_res);
}
int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
drm_err(&i915->drm, "failed to read control word\n");
return -EIO;
}
if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
return 0;
if (enable_decode)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
drm_err(&i915->drm, "failed to write control word\n");
return -EIO;
}
return 0;
}
unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
intel_gmch_vga_set_state(i915, enable_decode);
if (enable_decode)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}