Contributors: 18
Author Tokens Token Proportion Commits Commit Proportion
Ben Skeggs 276 51.30% 18 40.91%
Dave Airlie 91 16.91% 7 15.91%
Thomas Zimmermann 33 6.13% 1 2.27%
Takashi Iwai 25 4.65% 1 2.27%
Lukas Wunner 23 4.28% 1 2.27%
Ilia Mirkin 18 3.35% 2 4.55%
Alex Deucher 13 2.42% 1 2.27%
Alexandre Courbot 12 2.23% 1 2.27%
Jérôme Glisse 12 2.23% 1 2.27%
Christoph Hellwig 8 1.49% 2 4.55%
Marcin Kościelnicki 5 0.93% 1 2.27%
Daniel Vetter 5 0.93% 2 4.55%
Chris Wilson 4 0.74% 1 2.27%
Mikko Perttunen 4 0.74% 1 2.27%
Jesse Barnes 3 0.56% 1 2.27%
Peter Lekensteyn 3 0.56% 1 2.27%
Joe Perches 2 0.37% 1 2.27%
Noralf Trönnes 1 0.19% 1 2.27%
Total 538 44


// SPDX-License-Identifier: MIT
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>

#include <drm/drm_fb_helper.h>

#include "nouveau_drv.h"
#include "nouveau_acpi.h"
#include "nouveau_vga.h"

static unsigned int
nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
{
	struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
	struct nvif_object *device = &drm->client.device.object;

	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
	    drm->client.device.info.chipset >= 0x4c)
		nvif_wr32(device, 0x088060, state);
	else
	if (drm->client.device.info.chipset >= 0x40)
		nvif_wr32(device, 0x088054, state);
	else
		nvif_wr32(device, 0x001854, state);

	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

static void
nouveau_switcheroo_set_state(struct pci_dev *pdev,
			     enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
		return;

	if (state == VGA_SWITCHEROO_ON) {
		pr_err("VGA switcheroo: switched nouveau on\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		nouveau_pmops_resume(&pdev->dev);
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
	} else {
		pr_err("VGA switcheroo: switched nouveau off\n");
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
		nouveau_switcheroo_optimus_dsm();
		nouveau_pmops_suspend(&pdev->dev);
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
	}
}

static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	drm_fb_helper_output_poll_changed(dev);
}

static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	/*
	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
	 * locking inversion with the driver load path. And the access here is
	 * completely racy anyway. So don't bother with locking for now.
	 */
	return atomic_read(&dev->open_count) == 0;
}

static const struct vga_switcheroo_client_ops
nouveau_switcheroo_ops = {
	.set_gpu_state = nouveau_switcheroo_set_state,
	.reprobe = nouveau_switcheroo_reprobe,
	.can_switch = nouveau_switcheroo_can_switch,
};

void
nouveau_vga_init(struct nouveau_drm *drm)
{
	struct drm_device *dev = drm->dev;
	bool runtime = nouveau_pmops_runtime();
	struct pci_dev *pdev;

	/* only relevant for PCI devices */
	if (!dev_is_pci(dev->dev))
		return;
	pdev = to_pci_dev(dev->dev);

	vga_client_register(pdev, nouveau_vga_set_decode);

	/* don't register Thunderbolt eGPU with vga_switcheroo */
	if (pci_is_thunderbolt_attached(pdev))
		return;

	vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);

	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
		vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
}

void
nouveau_vga_fini(struct nouveau_drm *drm)
{
	struct drm_device *dev = drm->dev;
	bool runtime = nouveau_pmops_runtime();
	struct pci_dev *pdev;

	/* only relevant for PCI devices */
	if (!dev_is_pci(dev->dev))
		return;
	pdev = to_pci_dev(dev->dev);

	vga_client_unregister(pdev);

	if (pci_is_thunderbolt_attached(pdev))
		return;

	vga_switcheroo_unregister_client(pdev);
	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
		vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}


void
nouveau_vga_lastclose(struct drm_device *dev)
{
	vga_switcheroo_process_delayed_switch();
}