drm/amd/pm: Remove unused interface to set plpd
Remove unused callback to set PLPD policy and its implementation from arcturus, aldebaran and SMUv13.0.6 SOCs. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Asad Kamal <asad.kamal@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>pull/914/head
parent
2aadb520bf
commit
9488d7affe
|
|
@ -876,12 +876,6 @@ struct pptable_funcs {
|
|||
*/
|
||||
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
|
||||
|
||||
/**
|
||||
* @select_xgmi_plpd_policy: Select xgmi per-link power down policy.
|
||||
*/
|
||||
int (*select_xgmi_plpd_policy)(struct smu_context *smu,
|
||||
enum pp_xgmi_plpd_mode mode);
|
||||
|
||||
/**
|
||||
* @update_pcie_parameters: Update and upload the system's PCIe
|
||||
* capabilites to the SMU.
|
||||
|
|
|
|||
|
|
@ -2222,27 +2222,6 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
|
|||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
|
||||
}
|
||||
|
||||
static int arcturus_select_xgmi_plpd_policy(struct smu_context *smu,
|
||||
enum pp_xgmi_plpd_mode mode)
|
||||
{
|
||||
/* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
|
||||
if (smu->smc_fw_version < 0x00361700) {
|
||||
dev_err(smu->adev->dev, "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode == XGMI_PLPD_DEFAULT)
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
1, NULL);
|
||||
else if (mode == XGMI_PLPD_DISALLOW)
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
0, NULL);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct throttling_logging_label {
|
||||
uint32_t feature_mask;
|
||||
const char *label;
|
||||
|
|
@ -2440,7 +2419,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
|
||||
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
||||
.set_df_cstate = arcturus_set_df_cstate,
|
||||
.select_xgmi_plpd_policy = arcturus_select_xgmi_plpd_policy,
|
||||
.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
||||
|
|
|
|||
|
|
@ -1643,29 +1643,6 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
|
|||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
|
||||
}
|
||||
|
||||
static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu,
|
||||
enum pp_xgmi_plpd_mode mode)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
/* The message only works on master die and NACK will be sent
|
||||
back for other dies, only send it on master die */
|
||||
if (adev->smuio.funcs->get_socket_id(adev) ||
|
||||
adev->smuio.funcs->get_die_id(adev))
|
||||
return 0;
|
||||
|
||||
if (mode == XGMI_PLPD_DEFAULT)
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
0, NULL);
|
||||
else if (mode == XGMI_PLPD_DISALLOW)
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
1, NULL);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct throttling_logging_label {
|
||||
uint32_t feature_mask;
|
||||
const char *label;
|
||||
|
|
@ -2105,7 +2082,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
|
|||
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
|
||||
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
|
||||
.set_df_cstate = aldebaran_set_df_cstate,
|
||||
.select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy,
|
||||
.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
||||
|
|
|
|||
|
|
@ -3248,44 +3248,6 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
|
|||
.parse_error_code = aca_smu_parse_error_code,
|
||||
};
|
||||
|
||||
static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
|
||||
enum pp_xgmi_plpd_mode mode)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret, param;
|
||||
|
||||
switch (mode) {
|
||||
case XGMI_PLPD_DEFAULT:
|
||||
param = PPSMC_PLPD_MODE_DEFAULT;
|
||||
break;
|
||||
case XGMI_PLPD_OPTIMIZED:
|
||||
param = PPSMC_PLPD_MODE_OPTIMIZED;
|
||||
break;
|
||||
case XGMI_PLPD_DISALLOW:
|
||||
param = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode == XGMI_PLPD_DISALLOW)
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
param, NULL);
|
||||
else
|
||||
/* change xgmi per-link power down policy */
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SelectPLPDMode,
|
||||
param, NULL);
|
||||
|
||||
if (ret)
|
||||
dev_err(adev->dev,
|
||||
"select xgmi per-link power down policy %d failed\n",
|
||||
mode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
|
||||
/* init dpm */
|
||||
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
|
||||
|
|
@ -3326,7 +3288,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
|
|||
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
|
||||
.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
|
||||
.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
|
||||
.select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy,
|
||||
.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
|
||||
|
|
|
|||
Loading…
Reference in New Issue