drm/amd/powerplay: update swSMU VCN/JPEG PG logics
authorEvan Quan <evan.quan@amd.com>
Mon, 3 Aug 2020 03:15:14 +0000 (11:15 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 7 Aug 2020 21:29:45 +0000 (17:29 -0400)
Add lock protections and avoid unnecessary actions
if the PG state is already the same as required.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Tested-by: Matt Coffin <mcoffin13@gmail.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/powerplay/smu_internal.h

index f3f50b5..1b64ca9 100644 (file)
@@ -133,6 +133,56 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
        return ret;
 }
 
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+                                 bool enable)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int ret = 0;
+
+       if (!smu->ppt_funcs->dpm_set_vcn_enable)
+               return 0;
+
+       mutex_lock(&power_gate->vcn_gate_lock);
+
+       if (atomic_read(&power_gate->vcn_gated) ^ enable)
+               goto out;
+
+       ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
+       if (!ret)
+               atomic_set(&power_gate->vcn_gated, !enable);
+
+out:
+       mutex_unlock(&power_gate->vcn_gate_lock);
+
+       return ret;
+}
+
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+                                  bool enable)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int ret = 0;
+
+       if (!smu->ppt_funcs->dpm_set_jpeg_enable)
+               return 0;
+
+       mutex_lock(&power_gate->jpeg_gate_lock);
+
+       if (atomic_read(&power_gate->jpeg_gated) ^ enable)
+               goto out;
+
+       ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
+       if (!ret)
+               atomic_set(&power_gate->jpeg_gated, !enable);
+
+out:
+       mutex_unlock(&power_gate->jpeg_gate_lock);
+
+       return ret;
+}
+
 /**
  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
  *
@@ -650,6 +700,11 @@ static int smu_sw_init(void *handle)
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
 
+       atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+       atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
+       mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
+       mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
+
        smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
        smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
        smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
@@ -1974,7 +2029,7 @@ int smu_read_sensor(struct smu_context *smu,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
-               *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
+               *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
index e3a2d7f..e59e6fb 100644 (file)
@@ -1896,8 +1896,6 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
 
 static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -1908,7 +1906,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                                return ret;
                        }
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                        ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
@@ -1917,7 +1914,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                                return ret;
                        }
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
index ec2d2aa..23c2279 100644 (file)
@@ -295,8 +295,10 @@ struct smu_dpm_context {
 struct smu_power_gate {
        bool uvd_gated;
        bool vce_gated;
-       bool vcn_gated;
-       bool jpeg_gated;
+       atomic_t vcn_gated;
+       atomic_t jpeg_gated;
+       struct mutex vcn_gate_lock;
+       struct mutex jpeg_gate_lock;
 };
 
 struct smu_power_context {
index 8c45c47..82659b7 100644 (file)
@@ -831,8 +831,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -842,14 +840,12 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -857,8 +853,6 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -867,14 +861,12 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
index 4c1a506..8a8e603 100644 (file)
@@ -479,8 +479,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
 
 static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -490,14 +488,12 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -505,8 +501,6 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
 static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -515,14 +509,12 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
index 2566ae5..bb756b4 100644 (file)
@@ -815,10 +815,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
 static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        struct amdgpu_device *adev = smu->adev;
-
        int ret = 0;
 
        if (enable) {
@@ -834,7 +831,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
                                        return ret;
                        }
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
@@ -847,7 +843,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
                                        return ret;
                        }
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -855,8 +850,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
 
 static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -865,14 +858,12 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
index d0deaef..f1d8f24 100644 (file)
@@ -77,8 +77,6 @@
 #define smu_get_dal_power_level(smu, clocks)                           smu_ppt_funcs(get_dal_power_level, 0, smu, clocks)
 #define smu_get_perf_level(smu, designation, level)                    smu_ppt_funcs(get_perf_level, 0, smu, designation, level)
 #define smu_get_current_shallow_sleep_clocks(smu, clocks)              smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
-#define smu_dpm_set_vcn_enable(smu, enable)                            smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
-#define smu_dpm_set_jpeg_enable(smu, enable)                           smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
 #define smu_set_watermarks_table(smu, clock_ranges)                    smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
 #define smu_thermal_temperature_range_update(smu, range, rw)           smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
 #define smu_register_irq_handler(smu)                                  smu_ppt_funcs(register_irq_handler, 0, smu)