Contributors: 7
Author Tokens Token Proportion Commits Commit Proportion
Hawking Zhang 201 63.61% 5 33.33%
Lijo Lazar 43 13.61% 1 6.67%
Asad kamal 29 9.18% 1 6.67%
Ken Wang 20 6.33% 1 6.67%
yipechai 15 4.75% 4 26.67%
xinhui pan 5 1.58% 2 13.33%
Chunming Zhou 3 0.95% 1 6.67%
Total 316 15


/*
 * Copyright (C) 2019  Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included
 * in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#include "amdgpu.h"
#include "amdgpu_ras.h"

int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
{
	int err;
	struct amdgpu_nbio_ras *ras;

	if (!adev->nbio.ras)
		return 0;

	ras = adev->nbio.ras;
	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
	if (err) {
		dev_err(adev->dev, "Failed to register pcie_bif ras block!\n");
		return err;
	}

	strcpy(ras->ras_block.ras_comm.name, "pcie_bif");
	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF;
	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
	adev->nbio.ras_if = &ras->ras_block.ras_comm;

	return 0;
}

u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
{
	if (adev->nbio.funcs && adev->nbio.funcs->get_pcie_replay_count)
		return adev->nbio.funcs->get_pcie_replay_count(adev);

	return 0;
}

void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
				uint64_t *count1)
{
	if (adev->nbio.funcs->get_pcie_usage)
		adev->nbio.funcs->get_pcie_usage(adev, count0, count1);

}

int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
	int r;
	r = amdgpu_ras_block_late_init(adev, ras_block);
	if (r)
		return r;

	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
		r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
		if (r)
			goto late_fini;
		r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
		if (r)
			goto late_fini;
	}

	return 0;
late_fini:
	amdgpu_ras_block_late_fini(adev, ras_block);
	return r;
}