drm/amdgpu/sdma: set sched.ready status after ring/IB test in sdma
authorGuchun Chen <guchun.chen@amd.com>
Fri, 12 May 2023 08:04:56 +0000 (16:04 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 9 Jun 2023 14:57:07 +0000 (10:57 -0400)
sched.ready is nothing with ring initialization, it needs to set
to be true after ring/IB test in amdgpu_ring_test_helper to tell
the ring is ready for submission.

Signed-off-by: Guchun Chen <guchun.chen@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c

index 67d1623..52598fb 100644 (file)
@@ -489,8 +489,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 #endif
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
-               ring->sched.ready = true;
        }
 
        cik_sdma_enable(adev, true);
index fd2a7b6..51afc92 100644 (file)
@@ -466,8 +466,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 #endif
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
-               ring->sched.ready = true;
        }
 
        sdma_v2_4_enable(adev, true);
index e572389..3442028 100644 (file)
@@ -734,8 +734,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 #endif
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
-               ring->sched.ready = true;
        }
 
        /* unhalt the MEs */
index 70b0d1f..1f83eeb 100644 (file)
@@ -1114,8 +1114,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 /**
@@ -1202,8 +1200,6 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 static void
index 590b085..ff41fb5 100644 (file)
@@ -685,8 +685,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 /**
@@ -776,8 +774,6 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
index a0077cf..5c4d4df 100644 (file)
@@ -819,8 +819,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
 
-               ring->sched.ready = true;
-
                if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
                        sdma_v5_0_ctx_switch_enable(adev, true);
                        sdma_v5_0_enable(adev, true);
index efa2c84..6aae62b 100644 (file)
@@ -617,8 +617,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
 
-               ring->sched.ready = true;
-
                if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
                        sdma_v5_2_ctx_switch_enable(adev, true);
                        sdma_v5_2_enable(adev, true);
@@ -630,6 +628,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
                        return r;
                }
 
+               ring->sched.ready = true;
+
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
index 79d0979..1c90b5c 100644 (file)
@@ -585,16 +585,12 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
 
-               ring->sched.ready = true;
-
                if (amdgpu_sriov_vf(adev))
                        sdma_v6_0_enable(adev, true);
 
                r = amdgpu_ring_test_helper(ring);
-               if (r) {
-                       ring->sched.ready = false;
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
index abca8b5..42c4547 100644 (file)
@@ -174,8 +174,6 @@ static int si_dma_start(struct amdgpu_device *adev)
                WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 
-               ring->sched.ready = true;
-
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;