drm/sched: Rename drm_sched_get_cleanup_job to be more descriptive
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Thu, 2 Nov 2023 10:55:34 +0000 (10:55 +0000)
committerLuben Tuikov <ltuikov89@gmail.com>
Sun, 5 Nov 2023 01:17:42 +0000 (21:17 -0400)
"Get cleanup job" makes it sound like helper is returning a job which will
execute some cleanup, or something, while the kerneldoc itself accurately
says "fetch the next _finished_ job". So lets rename the helper to be self
documenting.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Luben Tuikov <ltuikov89@gmail.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-2-tvrtko.ursulin@linux.intel.com
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
drivers/gpu/drm/scheduler/sched_main.c

index 98b2ad5..fb64b35 100644 (file)
@@ -448,7 +448,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 
        sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 
-       /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+       /* Protects against concurrent deletion in drm_sched_get_finished_job */
        spin_lock(&sched->job_list_lock);
        job = list_first_entry_or_null(&sched->pending_list,
                                       struct drm_sched_job, list);
@@ -500,9 +500,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 
        /*
         * Reinsert back the bad job here - now it's safe as
-        * drm_sched_get_cleanup_job cannot race against us and release the
+        * drm_sched_get_finished_job cannot race against us and release the
         * bad job at this point - we parked (waited for) any in progress
-        * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+        * (earlier) cleanups and drm_sched_get_finished_job will not be called
         * now until the scheduler thread is unparked.
         */
        if (bad && bad->sched == sched)
@@ -960,7 +960,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ * drm_sched_get_finished_job - fetch the next finished job to be destroyed
  *
  * @sched: scheduler instance
  *
@@ -968,7 +968,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
  * ready for it to be destroyed.
  */
 static struct drm_sched_job *
-drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
 {
        struct drm_sched_job *job, *next;
 
@@ -1059,14 +1059,14 @@ static void drm_sched_free_job_work(struct work_struct *w)
 {
        struct drm_gpu_scheduler *sched =
                container_of(w, struct drm_gpu_scheduler, work_free_job);
-       struct drm_sched_job *cleanup_job;
+       struct drm_sched_job *job;
 
        if (READ_ONCE(sched->pause_submit))
                return;
 
-       cleanup_job = drm_sched_get_cleanup_job(sched);
-       if (cleanup_job) {
-               sched->ops->free_job(cleanup_job);
+       job = drm_sched_get_finished_job(sched);
+       if (job) {
+               sched->ops->free_job(job);
 
                drm_sched_free_job_queue_if_done(sched);
                drm_sched_run_job_queue_if_ready(sched);