drm/amdgpu: add missing cleanups for more ASICs on UVD/VCE suspend
authorEvan Quan <evan.quan@amd.com>
Thu, 19 Aug 2021 04:07:59 +0000 (12:07 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 25 Aug 2021 22:15:02 +0000 (18:15 -0400)
This is a supplement for commit below:
"drm/amdgpu: add missing cleanups for Polaris12 UVD/VCE on suspend".

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Guchun Chen <guchun.chen@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c

index 6c0e914..7232241 100644 (file)
@@ -698,6 +698,30 @@ static int uvd_v3_1_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v3_1_stop(adev);
 
index a301518..52d6de9 100644 (file)
@@ -212,6 +212,30 @@ static int uvd_v4_2_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v4_2_stop(adev);
 
index a4d5bd2..db6d067 100644 (file)
@@ -210,6 +210,30 @@ static int uvd_v5_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v5_0_stop(adev);
 
@@ -224,7 +248,6 @@ static int uvd_v5_0_suspend(void *handle)
        r = uvd_v5_0_hw_fini(adev);
        if (r)
                return r;
-       uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
 
        return amdgpu_uvd_suspend(adev);
 }
index 939bcfa..b6e82d7 100644 (file)
@@ -606,6 +606,30 @@ static int uvd_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (!amdgpu_sriov_vf(adev))
                uvd_v7_0_stop(adev);
        else {
index c7d28c1..b70c17f 100644 (file)
@@ -477,6 +477,31 @@ static int vce_v2_0_hw_init(void *handle)
 
 static int vce_v2_0_hw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_vce(adev, false);
+       } else {
+               amdgpu_asic_set_vce_clocks(adev, 0, 0);
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        return 0;
 }
 
index 90910d1..fec902b 100644 (file)
@@ -542,6 +542,29 @@ static int vce_v4_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_vce(adev, false);
+       } else {
+               amdgpu_asic_set_vce_clocks(adev, 0, 0);
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (!amdgpu_sriov_vf(adev)) {
                /* vce_v4_0_wait_for_idle(handle); */
                vce_v4_0_stop(adev);