Release 4.18 drivers/gpu/drm/amd/powerplay/amd_powerplay.c
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
static const struct amd_pm_funcs pp_dpm_funcs;
static int amd_powerplay_create(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr;
if (adev == NULL)
return -EINVAL;
hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
if (hwmgr == NULL)
return -ENOMEM;
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
hwmgr->feature_mask = adev->powerplay.pp_feature;
hwmgr->display_config = &adev->pm.pm_display_cfg;
adev->powerplay.pp_handle = hwmgr;
adev->powerplay.pp_funcs = &pp_dpm_funcs;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 144 | 92.90% | 6 | 75.00% |
Alex Deucher | 6 | 3.87% | 1 | 12.50% |
Huang Rui | 5 | 3.23% | 1 | 12.50% |
Total | 155 | 100.00% | 8 | 100.00% |
static void amd_powerplay_destroy(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
kfree(hwmgr->hardcode_pp_table);
hwmgr->hardcode_pp_table = NULL;
kfree(hwmgr);
hwmgr = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 28 | 63.64% | 4 | 66.67% |
Eric Huang | 13 | 29.55% | 1 | 16.67% |
Jammy Zhou | 3 | 6.82% | 1 | 16.67% |
Total | 44 | 100.00% | 6 | 100.00% |
static int pp_early_init(void *handle)
{
int ret;
struct amdgpu_device *adev = handle;
ret = amd_powerplay_create(adev);
if (ret != 0)
return ret;
ret = hwmgr_early_init(adev->powerplay.pp_handle);
if (ret)
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 56 | 96.55% | 6 | 85.71% |
Jammy Zhou | 2 | 3.45% | 1 | 14.29% |
Total | 58 | 100.00% | 7 | 100.00% |
static int pp_sw_init(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
ret = hwmgr_sw_init(hwmgr);
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 43 | 79.63% | 5 | 50.00% |
Alex Deucher | 5 | 9.26% | 2 | 20.00% |
Huang Rui | 3 | 5.56% | 1 | 10.00% |
Jammy Zhou | 2 | 3.70% | 1 | 10.00% |
Pixel Ding | 1 | 1.85% | 1 | 10.00% |
Total | 54 | 100.00% | 10 | 100.00% |
static int pp_sw_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
hwmgr_sw_fini(hwmgr);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
amdgpu_ucode_fini_bo(adev);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 53 | 75.71% | 6 | 75.00% |
Alex Deucher | 11 | 15.71% | 1 | 12.50% |
Jammy Zhou | 6 | 8.57% | 1 | 12.50% |
Total | 70 | 100.00% | 8 | 100.00% |
static int pp_hw_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev);
ret = hwmgr_hw_init(hwmgr);
if (ret)
pr_err("powerplay hw init failed\n");
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 44 | 65.67% | 7 | 63.64% |
Jammy Zhou | 12 | 17.91% | 2 | 18.18% |
Alex Deucher | 10 | 14.93% | 1 | 9.09% |
Huang Rui | 1 | 1.49% | 1 | 9.09% |
Total | 67 | 100.00% | 11 | 100.00% |
static int pp_hw_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
hwmgr_hw_fini(hwmgr);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 18 | 50.00% | 2 | 50.00% |
Alex Deucher | 13 | 36.11% | 1 | 25.00% |
Jammy Zhou | 5 | 13.89% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
{
int r = -EINVAL;
void *cpu_ptr = NULL;
uint64_t gpu_addr;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->pm.smu_prv_buffer,
&gpu_addr,
&cpu_ptr)) {
DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
return;
}
if (hwmgr->hwmgr_func->notify_cac_buffer_info)
r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
lower_32_bits((unsigned long)cpu_ptr),
upper_32_bits((unsigned long)cpu_ptr),
lower_32_bits(gpu_addr),
upper_32_bits(gpu_addr),
adev->pm.smu_prv_buffer_size);
if (r) {
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
adev->pm.smu_prv_buffer = NULL;
DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 161 | 100.00% | 1 | 100.00% |
Total | 161 | 100.00% | 1 | 100.00% |
static int pp_late_init(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (hwmgr && hwmgr->pm_en) {
mutex_lock(&hwmgr->smu_lock);
hwmgr_handle_task(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
mutex_unlock(&hwmgr->smu_lock);
}
if (adev->pm.smu_prv_buffer_size != 0)
pp_reserve_vram_for_smu(adev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 81 | 100.00% | 5 | 100.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static void pp_late_fini(void *handle)
{
struct amdgpu_device *adev = handle;
if (adev->pm.smu_prv_buffer)
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
amd_powerplay_destroy(adev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 44 | 100.00% | 3 | 100.00% |
Total | 44 | 100.00% | 3 | 100.00% |
static bool pp_is_idle(void *handle)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Deucher | 12 | 92.31% | 1 | 50.00% |
Edward O'Callaghan | 1 | 7.69% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
static int pp_wait_for_idle(void *handle)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Deucher | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static int pp_sw_reset(void *handle)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Deucher | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->gfx_off_control) {
/* Enable/disable GFX off through SMU */
ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
state == AMD_PG_STATE_GATE);
if (ret)
pr_err("gfx off control failed!\n");
}
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
pr_debug("%s was not implemented.\n", __func__);
return 0;
}
/* Enable/disable GFX per cu powergating through SMU */
return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
state == AMD_PG_STATE_GATE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Rui | 39 | 32.50% | 2 | 25.00% |
Eric Huang | 33 | 27.50% | 1 | 12.50% |
Rex Zhu | 32 | 26.67% | 4 | 50.00% |
Alex Deucher | 16 | 13.33% | 1 | 12.50% |
Total | 120 | 100.00% | 8 | 100.00% |
static int pp_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
return hwmgr_suspend(hwmgr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 23 | 67.65% | 4 | 80.00% |
Alex Deucher | 11 | 32.35% | 1 | 20.00% |
Total | 34 | 100.00% | 5 | 100.00% |
static int pp_resume(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
return hwmgr_resume(hwmgr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 23 | 67.65% | 6 | 85.71% |
Alex Deucher | 11 | 32.35% | 1 | 14.29% |
Total | 34 | 100.00% | 7 | 100.00% |
static int pp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrey Grodzovsky | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init,
.late_init = pp_late_init,
.sw_init = pp_sw_init,
.sw_fini = pp_sw_fini,
.hw_init = pp_hw_init,
.hw_fini = pp_hw_fini,
.late_fini = pp_late_fini,
.suspend = pp_suspend,
.resume = pp_resume,
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
};
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &pp_ip_funcs,
};
static int pp_dpm_load_fw(void *handle)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Deucher | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static int pp_dpm_fw_loading_complete(void *handle)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Deucher | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 69 | 100.00% | 2 | 100.00% |
Total | 69 | 100.00% | 2 | 100.00% |
static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level *level)
{
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
if (!(hwmgr->dpm_level & profile_mode_mask)) {
/* enter umd pstate, save current level, disable gfx cg*/
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
if (!(*level & profile_mode_mask)) {
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = hwmgr->saved_dpm_level;
hwmgr->en_umd_pstate = false;
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 144 | 100.00% | 2 | 100.00% |
Total | 144 | 100.00% | 2 | 100.00% |
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (level == hwmgr->dpm_level)
return 0;
mutex_lock(&hwmgr->smu_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 71 | 80.68% | 7 | 87.50% |
Alex Deucher | 17 | 19.32% | 1 | 12.50% |
Total | 88 | 100.00% | 8 | 100.00% |
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr = handle;
enum amd_dpm_forced_level level;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
level = hwmgr->dpm_level;
mutex_unlock(&hwmgr->smu_lock);
return level;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 49 | 80.33% | 5 | 83.33% |
Alex Deucher | 12 | 19.67% | 1 | 16.67% |
Total | 61 | 100.00% | 6 | 100.00% |
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
mutex_unlock(&hwmgr->smu_lock);
return clk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 77 | 83.70% | 7 | 77.78% |
Alex Deucher | 14 | 15.22% | 1 | 11.11% |
Huang Rui | 1 | 1.09% | 1 | 11.11% |
Total | 92 | 100.00% | 9 | 100.00% |
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
mutex_unlock(&hwmgr->smu_lock);
return clk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 91 | 98.91% | 7 | 87.50% |
Huang Rui | 1 | 1.09% | 1 | 12.50% |
Total | 92 | 100.00% | 8 | 100.00% |
static void pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
mutex_unlock(&hwmgr->smu_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 65 | 83.33% | 7 | 77.78% |
Alex Deucher | 12 | 15.38% | 1 | 11.11% |
Huang Rui | 1 | 1.28% | 1 | 11.11% |
Total | 78 | 100.00% | 9 | 100.00% |
static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
mutex_unlock(&hwmgr->smu_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 65 | 83.33% | 7 | 77.78% |
Alex Deucher | 12 | 15.38% | 1 | 11.11% |
Huang Rui | 1 | 1.28% | 1 | 11.11% |
Total | 78 | 100.00% | 9 | 100.00% |
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
int ret = 0;
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr_handle_task(hwmgr, task_id, user_state);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 54 | 72.00% | 7 | 63.64% |
Alex Deucher | 14 | 18.67% | 1 | 9.09% |
Evan Quan | 4 | 5.33% | 1 | 9.09% |
Eric Huang | 2 | 2.67% | 1 | 9.09% |
Baoyou Xie | 1 | 1.33% | 1 | 9.09% |
Total | 75 | 100.00% | 11 | 100.00% |
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct pp_power_state *state;
enum amd_pm_state_type pm_type;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
state = hwmgr->current_ps;
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
pm_type = POWER_STATE_TYPE_BATTERY;
break;
case PP_StateUILabel_Balanced:
pm_type = POWER_STATE_TYPE_BALANCED;
break;
case PP_StateUILabel_Performance:
pm_type = POWER_STATE_TYPE_PERFORMANCE;
break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
else
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
mutex_unlock(&hwmgr->smu_lock);
return pm_type;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 99 | 78.57% | 4 | 50.00% |
Eric Huang | 12 | 9.52% | 1 | 12.50% |
Alex Deucher | 10 | 7.94% | 1 | 12.50% |
Dan Carpenter | 4 | 3.17% | 1 | 12.50% |
Baoyou Xie | 1 | 0.79% | 1 | 12.50% |
Total | 126 | 100.00% | 8 | 100.00% |
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
mutex_unlock(&hwmgr->smu_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 77 | 98.72% | 7 | 87.50% |
Huang Rui | 1 | 1.28% | 1 | 12.50% |
Total | 78 | 100.00% | 8 | 100.00% |
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t mode = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
mutex_unlock(&hwmgr->smu_lock);
return mode;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 86 | 98.85% | 7 | 87.50% |
Huang Rui | 1 | 1.15% | 1 | 12.50% |
Total | 87 | 100.00% | 8 | 100.00% |
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 92 | 98.92% | 6 | 85.71% |
Huang Rui | 1 | 1.08% | 1 | 14.29% |
Total | 93 | 100.00% | 7 | 100.00% |
static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 93 | 98.94% | 6 | 85.71% |
Huang Rui | 1 | 1.06% | 1 | 14.29% |
Total | 94 | 100.00% | 7 | 100.00% |
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Grazvydas Ignotas | 48 | 55.81% | 1 | 20.00% |
Rex Zhu | 38 | 44.19% | 4 | 80.00% |
Total | 86 | 100.00% | 5 | 100.00% |
static int pp_dpm_get_pp_num_states(void *handle,
struct pp_states_info *data)
{
struct pp_hwmgr *hwmgr = handle;
int i;
memset(data, 0, sizeof(*data));
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
data->nums = hwmgr->num_ps;
for (i = 0; i < hwmgr->num_ps; i++) {
struct pp_power_state *state = (struct pp_power_state *)
((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
data->states[i] = POWER_STATE_TYPE_BATTERY;
break;
case PP_StateUILabel_Balanced:
data->states[i] = POWER_STATE_TYPE_BALANCED;
break;
case PP_StateUILabel_Performance:
data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
else
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 166 | 80.58% | 1 | 20.00% |
Rex Zhu | 27 | 13.11% | 3 | 60.00% |
Evan Quan | 13 | 6.31% | 1 | 20.00% |
Total | 206 | 100.00% | 5 | 100.00% |
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
int size = 0;
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
*table = (char *)hwmgr->soft_pp_table;
size = hwmgr->soft_pp_table_size;
mutex_unlock(&hwmgr->smu_lock);
return size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 45 | 54.88% | 2 | 33.33% |
Rex Zhu | 37 | 45.12% | 4 | 66.67% |
Total | 82 | 100.00% | 6 | 100.00% |
static int amd_powerplay_reset(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
int ret;
ret = hwmgr_hw_fini(hwmgr);
if (ret)
return ret;
ret = hwmgr_hw_init(hwmgr);
if (ret)
return ret;
return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 58 | 100.00% | 3 | 100.00% |
Total | 58 | 100.00% | 3 | 100.00% |
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
struct pp_hwmgr *hwmgr = handle;
int ret = -ENOMEM;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
goto err;
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
ret = amd_powerplay_reset(handle);
if (ret)
goto err;
if (hwmgr->hwmgr_func->avfs_control) {
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
if (ret)
goto err;
}
mutex_unlock(&hwmgr->smu_lock);
return 0;
err:
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 107 | 60.80% | 3 | 30.00% |
Rex Zhu | 64 | 36.36% | 6 | 60.00% |
Edward O'Callaghan | 5 | 2.84% | 1 | 10.00% |
Total | 176 | 100.00% | 10 | 100.00% |
static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
else
ret = -EINVAL;
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 64 | 56.64% | 6 | 66.67% |
Eric Huang | 48 | 42.48% | 2 | 22.22% |
Huang Rui | 1 | 0.88% | 1 | 11.11% |
Total | 113 | 100.00% | 9 | 100.00% |
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 52 | 52.00% | 1 | 14.29% |
Rex Zhu | 47 | 47.00% | 5 | 71.43% |
Huang Rui | 1 | 1.00% | 1 | 14.29% |
Total | 100 | 100.00% | 7 | 100.00% |
static int pp_dpm_get_sclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 49 | 55.68% | 1 | 16.67% |
Rex Zhu | 38 | 43.18% | 4 | 66.67% |
Huang Rui | 1 | 1.14% | 1 | 16.67% |
Total | 88 | 100.00% | 6 | 100.00% |
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 54 | 58.06% | 1 | 14.29% |
Rex Zhu | 37 | 39.78% | 4 | 57.14% |
Huang Rui | 1 | 1.08% | 1 | 14.29% |
Alex Deucher | 1 | 1.08% | 1 | 14.29% |
Total | 93 | 100.00% | 7 | 100.00% |
static int pp_dpm_get_mclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 49 | 55.68% | 1 | 16.67% |
Rex Zhu | 38 | 43.18% | 4 | 66.67% |
Huang Rui | 1 | 1.14% | 1 | 16.67% |
Total | 88 | 100.00% | 6 | 100.00% |
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 54 | 58.06% | 1 | 16.67% |
Rex Zhu | 38 | 40.86% | 4 | 66.67% |
Huang Rui | 1 | 1.08% | 1 | 16.67% |
Total | 93 | 100.00% | 6 | 100.00% |
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !value)
return -EINVAL;
switch (idx) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
*((uint32_t *)value) = hwmgr->pstate_sclk;
return 0;
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
*((uint32_t *)value) = hwmgr->pstate_mclk;
return 0;
default:
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 83 | 63.36% | 5 | 62.50% |
Tom St Denis | 47 | 35.88% | 2 | 25.00% |
Eric Huang | 1 | 0.76% | 1 | 12.50% |
Total | 131 | 100.00% | 8 | 100.00% |
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return NULL;
if (idx < hwmgr->num_vce_state_tables)
return &hwmgr->vce_states[idx];
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Deucher | 40 | 72.73% | 1 | 33.33% |
Rex Zhu | 15 | 27.27% | 2 | 66.67% |
Total | 55 | 100.00% | 3 | 100.00% |
static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !buf)
return -EINVAL;
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return snprintf(buf, PAGE_SIZE, "\n");
}
return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 79 | 100.00% | 3 | 100.00% |
Total | 79 | 100.00% | 3 | 100.00% |
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
int ret = -EINVAL;
if (!hwmgr || !hwmgr->pm_en)
return ret;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return ret;
}
mutex_lock(&hwmgr->smu_lock);
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 107 | 100.00% | 4 | 100.00% |
Total | 107 | 100.00% | 4 | 100.00% |
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
}
return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 80 | 100.00% | 3 | 100.00% |
Total | 80 | 100.00% | 3 | 100.00% |
static int pp_dpm_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, bool en)
{
struct pp_hwmgr *hwmgr = handle;
long workload;
uint32_t index;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
}
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
if (!en) {
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
workload = hwmgr->workload_setting[index];
} else {
hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
index = index <= Workload_Policy_Max ? index - 1 : 0;
workload = hwmgr->workload_setting[index];
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 180 | 79.65% | 3 | 75.00% |
Eric Huang | 46 | 20.35% | 1 | 25.00% |
Total | 226 | 100.00% | 4 | 100.00% |
static int pp_set_power_limit(void *handle, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
}
if (limit == 0)
limit = hwmgr->default_power_limit;
if (limit > hwmgr->default_power_limit)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 117 | 100.00% | 3 | 100.00% |
Total | 117 | 100.00% | 3 | 100.00% |
static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en ||!limit)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
if (default_limit)
*limit = hwmgr->default_power_limit;
else
*limit = hwmgr->power_limit;
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 79 | 100.00% | 3 | 100.00% |
Total | 79 | 100.00% | 3 | 100.00% |
static int pp_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 60 | 95.24% | 3 | 75.00% |
Alex Deucher | 3 | 4.76% | 1 | 25.00% |
Total | 63 | 100.00% | 4 | 100.00% |
static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!output)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_get_dal_power_level(hwmgr, output);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 64 | 88.89% | 5 | 83.33% |
Vitaly Prosyak | 8 | 11.11% | 1 | 16.67% |
Total | 72 | 100.00% | 6 | 100.00% |
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks;
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment))
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
else
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
pr_info("Error in phm_get_clock_info \n");
mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
clocks->min_engine_clock = hw_clocks.min_eng_clk;
clocks->max_engine_clock = hw_clocks.max_eng_clk;
clocks->min_memory_clock = hw_clocks.min_mem_clk;
clocks->max_memory_clock = hw_clocks.max_mem_clk;
clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
mutex_unlock(&hwmgr->smu_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 257 | 98.85% | 7 | 87.50% |
David Rokhvarg | 3 | 1.15% | 1 | 12.50% |
Total | 260 | 100.00% | 8 | 100.00% |
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (clocks == NULL)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type(hwmgr, type, clocks);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 85 | 100.00% | 7 | 100.00% |
Total | 85 | 100.00% | 7 | 100.00% |
static int pp_get_clock_by_type_with_latency(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 64 | 82.05% | 1 | 25.00% |
Rex Zhu | 14 | 17.95% | 3 | 75.00% |
Total | 78 | 100.00% | 4 | 100.00% |
static int pp_get_clock_by_type_with_voltage(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 64 | 82.05% | 1 | 25.00% |
Rex Zhu | 14 | 17.95% | 3 | 75.00% |
Total | 78 | 100.00% | 4 | 100.00% |
static int pp_set_watermarks_for_clocks_ranges(void *handle,
struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
wm_with_clock_ranges);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 58 | 80.56% | 1 | 25.00% |
Rex Zhu | 14 | 19.44% | 3 | 75.00% |
Total | 72 | 100.00% | 4 | 100.00% |
static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clock)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_display_clock_voltage_request(hwmgr, clock);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 58 | 80.56% | 1 | 25.00% |
Rex Zhu | 14 | 19.44% | 3 | 75.00% |
Total | 72 | 100.00% | 4 | 100.00% |
static int pp_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *clocks)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 82 | 96.47% | 7 | 87.50% |
David Rokhvarg | 3 | 3.53% | 1 | 12.50% |
Total | 85 | 100.00% | 8 | 100.00% |
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Huang | 54 | 85.71% | 1 | 50.00% |
Rex Zhu | 9 | 14.29% | 1 | 50.00% |
Total | 63 | 100.00% | 2 | 100.00% |
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
.force_performance_level = pp_dpm_force_performance_level,
.get_performance_level = pp_dpm_get_performance_level,
.get_current_power_state = pp_dpm_get_current_power_state,
.powergate_vce = pp_dpm_powergate_vce,
.powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
.get_pp_num_states = pp_dpm_get_pp_num_states,
.get_pp_table = pp_dpm_get_pp_table,
.set_pp_table = pp_dpm_set_pp_table,
.force_clock_level = pp_dpm_force_clock_level,
.print_clock_levels = pp_dpm_print_clock_levels,
.get_sclk_od = pp_dpm_get_sclk_od,
.set_sclk_od = pp_dpm_set_sclk_od,
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
.set_power_limit = pp_set_power_limit,
.get_power_limit = pp_get_power_limit,
/* export to DC */
.get_sclk = pp_dpm_get_sclk,
.get_mclk = pp_dpm_get_mclk,
.display_configuration_change = pp_display_configuration_change,
.get_display_power_level = pp_get_display_power_level,
.get_current_clocks = pp_get_current_clocks,
.get_clock_by_type = pp_get_clock_by_type,
.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
};
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rex Zhu | 3996 | 70.60% | 40 | 51.28% |
Eric Huang | 1034 | 18.27% | 12 | 15.38% |
Alex Deucher | 368 | 6.50% | 4 | 5.13% |
Huang Rui | 65 | 1.15% | 6 | 7.69% |
Tom St Denis | 52 | 0.92% | 3 | 3.85% |
Grazvydas Ignotas | 48 | 0.85% | 1 | 1.28% |
Jammy Zhou | 35 | 0.62% | 2 | 2.56% |
Andrey Grodzovsky | 18 | 0.32% | 1 | 1.28% |
Evan Quan | 17 | 0.30% | 2 | 2.56% |
Vitaly Prosyak | 8 | 0.14% | 1 | 1.28% |
Edward O'Callaghan | 6 | 0.11% | 2 | 2.56% |
David Rokhvarg | 6 | 0.11% | 1 | 1.28% |
Dan Carpenter | 4 | 0.07% | 1 | 1.28% |
Baoyou Xie | 2 | 0.04% | 1 | 1.28% |
Pixel Ding | 1 | 0.02% | 1 | 1.28% |
Total | 5660 | 100.00% | 78 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.