Release 4.9 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
  
  
/*
 * Copyright 2013 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
#include "priv.h"
#include <subdev/timer.h>
void
nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
	if (pmu && pmu->func->pgob)
		pmu->func->pgob(pmu, enable);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 32 | 94.12% | 4 | 80.00% | 
| alexandre courbot | alexandre courbot | 2 | 5.88% | 1 | 20.00% | 
 | Total | 34 | 100.00% | 5 | 100.00% | 
int
nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
	      u32 process, u32 message, u32 data0, u32 data1)
{
	struct nvkm_subdev *subdev = &pmu->subdev;
	struct nvkm_device *device = subdev->device;
	u32 addr;
	mutex_lock(&subdev->mutex);
	/* wait for a free slot in the fifo */
	addr  = nvkm_rd32(device, 0x10a4a0);
	if (nvkm_msec(device, 2000,
		u32 tmp = nvkm_rd32(device, 0x10a4b0);
                if (tmp != (addr ^ 8))
                        break;
	) < 0) {
		mutex_unlock(&subdev->mutex);
		return -EBUSY;
	}
	/* we currently only support a single process at a time waiting
         * on a synchronous reply, take the PMU mutex and tell the
         * receive handler what we're waiting for
         */
	if (reply) {
		pmu->recv.message = message;
		pmu->recv.process = process;
	}
	/* acquire data segment access */
	do {
		nvkm_wr32(device, 0x10a580, 0x00000001);
	} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
	/* write the packet */
	nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
				pmu->send.base));
	nvkm_wr32(device, 0x10a1c4, process);
	nvkm_wr32(device, 0x10a1c4, message);
	nvkm_wr32(device, 0x10a1c4, data0);
	nvkm_wr32(device, 0x10a1c4, data1);
	nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
	/* release data segment access */
	nvkm_wr32(device, 0x10a580, 0x00000000);
	/* wait for reply, if requested */
	if (reply) {
		wait_event(pmu->recv.wait, (pmu->recv.process == 0));
		reply[0] = pmu->recv.data[0];
		reply[1] = pmu->recv.data[1];
	}
	mutex_unlock(&subdev->mutex);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 282 | 93.69% | 6 | 85.71% | 
| karol herbst | karol herbst | 19 | 6.31% | 1 | 14.29% | 
 | Total | 301 | 100.00% | 7 | 100.00% | 
static void
nvkm_pmu_recv(struct work_struct *work)
{
	struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
	struct nvkm_subdev *subdev = &pmu->subdev;
	struct nvkm_device *device = subdev->device;
	u32 process, message, data0, data1;
	/* nothing to do if GET == PUT */
	u32 addr =  nvkm_rd32(device, 0x10a4cc);
	if (addr == nvkm_rd32(device, 0x10a4c8))
		return;
	/* acquire data segment access */
	do {
		nvkm_wr32(device, 0x10a580, 0x00000002);
	} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
	/* read the packet */
	nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
				pmu->recv.base));
	process = nvkm_rd32(device, 0x10a1c4);
	message = nvkm_rd32(device, 0x10a1c4);
	data0   = nvkm_rd32(device, 0x10a1c4);
	data1   = nvkm_rd32(device, 0x10a1c4);
	nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
	/* release data segment access */
	nvkm_wr32(device, 0x10a580, 0x00000000);
	/* wake process if it's waiting on a synchronous reply */
	if (pmu->recv.process) {
		if (process == pmu->recv.process &&
		    message == pmu->recv.message) {
			pmu->recv.data[0] = data0;
			pmu->recv.data[1] = data1;
			pmu->recv.process = 0;
			wake_up(&pmu->recv.wait);
			return;
		}
	}
	/* right now there's no other expected responses from the engine,
         * so assume that any unexpected message is an error.
         */
	nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
		  (char)((process & 0x000000ff) >>  0),
		  (char)((process & 0x0000ff00) >>  8),
		  (char)((process & 0x00ff0000) >> 16),
		  (char)((process & 0xff000000) >> 24),
		  process, message, data0, data1);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 333 | 100.00% | 5 | 100.00% | 
 | Total | 333 | 100.00% | 5 | 100.00% | 
static void
nvkm_pmu_intr(struct nvkm_subdev *subdev)
{
	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
	struct nvkm_device *device = pmu->subdev.device;
	u32 disp = nvkm_rd32(device, 0x10a01c);
	u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
	if (intr & 0x00000020) {
		u32 stat = nvkm_rd32(device, 0x10a16c);
		if (stat & 0x80000000) {
			nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
				   stat & 0x00ffffff,
				   nvkm_rd32(device, 0x10a168));
			nvkm_wr32(device, 0x10a16c, 0x00000000);
			intr &= ~0x00000020;
		}
	}
	if (intr & 0x00000040) {
		schedule_work(&pmu->recv.work);
		nvkm_wr32(device, 0x10a004, 0x00000040);
		intr &= ~0x00000040;
	}
	if (intr & 0x00000080) {
		nvkm_info(subdev, "wr32 %06x %08x\n",
			  nvkm_rd32(device, 0x10a7a0),
			  nvkm_rd32(device, 0x10a7a4));
		nvkm_wr32(device, 0x10a004, 0x00000080);
		intr &= ~0x00000080;
	}
	if (intr) {
		nvkm_error(subdev, "intr %08x\n", intr);
		nvkm_wr32(device, 0x10a004, intr);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 218 | 100.00% | 6 | 100.00% | 
 | Total | 218 | 100.00% | 6 | 100.00% | 
static int
nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
	struct nvkm_device *device = pmu->subdev.device;
	nvkm_wr32(device, 0x10a014, 0x00000060);
	flush_work(&pmu->recv.work);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 57 | 100.00% | 5 | 100.00% | 
 | Total | 57 | 100.00% | 5 | 100.00% | 
static int
nvkm_pmu_init(struct nvkm_subdev *subdev)
{
	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
	struct nvkm_device *device = pmu->subdev.device;
	int i;
	/* prevent previous ucode from running, wait for idle, reset */
	nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
	nvkm_msec(device, 2000,
		if (!nvkm_rd32(device, 0x10a04c))
                        break;
	);
	nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
	nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
	nvkm_rd32(device, 0x000200);
	nvkm_msec(device, 2000,
		if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
                        break;
	);
	/* upload data segment */
	nvkm_wr32(device, 0x10a1c0, 0x01000000);
	for (i = 0; i < pmu->func->data.size / 4; i++)
		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
	/* upload code segment */
	nvkm_wr32(device, 0x10a180, 0x01000000);
	for (i = 0; i < pmu->func->code.size / 4; i++) {
		if ((i & 0x3f) == 0)
			nvkm_wr32(device, 0x10a188, i >> 6);
		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
	}
	/* start it running */
	nvkm_wr32(device, 0x10a10c, 0x00000000);
	nvkm_wr32(device, 0x10a104, 0x00000000);
	nvkm_wr32(device, 0x10a100, 0x00000002);
	/* wait for valid host->pmu ring configuration */
	if (nvkm_msec(device, 2000,
		if (nvkm_rd32(device, 0x10a4d0))
                        break;
	) < 0)
		return -EBUSY;
	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
	/* wait for valid pmu->host ring configuration */
	if (nvkm_msec(device, 2000,
		if (nvkm_rd32(device, 0x10a4dc))
                        break;
	) < 0)
		return -EBUSY;
	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
	nvkm_wr32(device, 0x10a010, 0x000000e0);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 358 | 99.44% | 7 | 87.50% | 
| martin peres | martin peres | 2 | 0.56% | 1 | 12.50% | 
 | Total | 360 | 100.00% | 8 | 100.00% | 
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
	return nvkm_pmu(subdev);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 18 | 100.00% | 2 | 100.00% | 
 | Total | 18 | 100.00% | 2 | 100.00% | 
static const struct nvkm_subdev_func
nvkm_pmu = {
	.dtor = nvkm_pmu_dtor,
	.init = nvkm_pmu_init,
	.fini = nvkm_pmu_fini,
	.intr = nvkm_pmu_intr,
};
int
nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_pmu **ppmu)
{
	struct nvkm_pmu *pmu;
	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
		return -ENOMEM;
	nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
	pmu->func = func;
	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
	init_waitqueue_head(&pmu->recv.wait);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 101 | 100.00% | 4 | 100.00% | 
 | Total | 101 | 100.00% | 4 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ben skeggs | ben skeggs | 1434 | 98.42% | 10 | 76.92% | 
| karol herbst | karol herbst | 19 | 1.30% | 1 | 7.69% | 
| alexandre courbot | alexandre courbot | 2 | 0.14% | 1 | 7.69% | 
| martin peres | martin peres | 2 | 0.14% | 1 | 7.69% | 
 | Total | 1457 | 100.00% | 13 | 100.00% |