struct drm_wedge_task_info *info = NULL;
struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
ring->funcs->reset) {
dev_err(adev->dev, "Starting %s ring reset\n",
s_job->sched->name);
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
if (!r) {
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
atomic_inc(&ring->adev->gpu_reset_counter);
dev_err(adev->dev, "Ring %s reset succeeded\n",
ring->sched.name);
drm_dev_wedged_event(adev_to_drm(adev),
DRM_WEDGE_RECOVERY_NONE, info);
+ /* This is needed to add the job back to the pending list */
+ status = DRM_GPU_SCHED_STAT_NO_HANG;
goto exit;
}
dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
exit:
amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_RESET;
+ return status;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
struct amdgpu_fence *guilty_fence)
{
- /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
- drm_sched_wqueue_stop(&ring->sched);
/* back up the non-guilty commands */
amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
}
amdgpu_ring_write(ring, ring->ring_backup[i]);
amdgpu_ring_commit(ring);
}
- /* Start the scheduler again */
- drm_sched_wqueue_start(&ring->sched);
return 0;
}