drm/amdgpu: partially revert "reduce reset time"
authorChristian König <christian.koenig@amd.com>
Thu, 12 Dec 2024 15:51:04 +0000 (16:51 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 18 Dec 2024 17:39:07 +0000 (12:39 -0500)
This partially reverts commit 194eb174cbe4fe2b3376ac30acca2dc8c8beca00.

This commit introduced a new state variable into adev without even
remotely worrying about CPU barriers.

Since we already have the amdgpu_in_reset() function exactly for this
use case partially revert that.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c

index 9f5351c..69895fc 100644 (file)
@@ -1175,7 +1175,6 @@ struct amdgpu_device {
 
        struct work_struct              reset_work;
 
-       bool                            job_hang;
        bool                            dc_enabled;
        /* Mask of active clusters */
        uint32_t                        aid_mask;
index de30143..2e5732d 100644 (file)
@@ -836,7 +836,7 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
-       if (!kiq_ring->sched.ready || adev->job_hang)
+       if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
                return 0;
 
        ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
index 4a4e40d..6d5d81f 100644 (file)
@@ -515,7 +515,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
-       if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev))
+       if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
                return 0;
 
        spin_lock(&kiq->ring_lock);
@@ -567,7 +567,7 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
-       if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang)
+       if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
                return 0;
 
        if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
index 0469175..100f044 100644 (file)
@@ -102,8 +102,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
                return DRM_GPU_SCHED_STAT_ENODEV;
        }
 
-       adev->job_hang = true;
-
        /*
         * Do the coredump immediately after a job timeout to get a very
         * close dump/snapshot/representation of GPU's current error status
@@ -181,7 +179,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
        }
 
 exit:
-       adev->job_hang = false;
        drm_dev_exit(idx);
        return DRM_GPU_SCHED_STAT_NOMINAL;
 }
index 9484f3b..003522c 100644 (file)
@@ -5957,7 +5957,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        else
                WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
 
-       if (adev->job_hang && !enable)
+       if (amdgpu_in_reset(adev) && !enable)
                return 0;
 
        for (i = 0; i < adev->usec_timeout; i++) {