s390/vdso: drop unnecessary cc-ldoption
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / command_submission.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 static void job_wq_completion(struct work_struct *work);
15 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
16                 struct hl_ctx *ctx, u64 timeout_us, u64 seq);
17 static void cs_do_release(struct kref *ref);
18
19 static const char *hl_fence_get_driver_name(struct dma_fence *fence)
20 {
21         return "HabanaLabs";
22 }
23
24 static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
25 {
26         struct hl_dma_fence *hl_fence =
27                 container_of(fence, struct hl_dma_fence, base_fence);
28
29         return dev_name(hl_fence->hdev->dev);
30 }
31
32 static bool hl_fence_enable_signaling(struct dma_fence *fence)
33 {
34         return true;
35 }
36
37 static void hl_fence_release(struct dma_fence *fence)
38 {
39         struct hl_dma_fence *hl_fence =
40                 container_of(fence, struct hl_dma_fence, base_fence);
41
42         kfree_rcu(hl_fence, base_fence.rcu);
43 }
44
45 static const struct dma_fence_ops hl_fence_ops = {
46         .get_driver_name = hl_fence_get_driver_name,
47         .get_timeline_name = hl_fence_get_timeline_name,
48         .enable_signaling = hl_fence_enable_signaling,
49         .wait = dma_fence_default_wait,
50         .release = hl_fence_release
51 };
52
53 static void cs_get(struct hl_cs *cs)
54 {
55         kref_get(&cs->refcount);
56 }
57
58 static int cs_get_unless_zero(struct hl_cs *cs)
59 {
60         return kref_get_unless_zero(&cs->refcount);
61 }
62
63 static void cs_put(struct hl_cs *cs)
64 {
65         kref_put(&cs->refcount, cs_do_release);
66 }
67
68 /*
69  * cs_parser - parse the user command submission
70  *
71  * @hpriv       : pointer to the private data of the fd
72  * @job        : pointer to the job that holds the command submission info
73  *
74  * The function parses the command submission of the user. It calls the
75  * ASIC specific parser, which returns a list of memory blocks to send
76  * to the device as different command buffers
77  *
78  */
79 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
80 {
81         struct hl_device *hdev = hpriv->hdev;
82         struct hl_cs_parser parser;
83         int rc;
84
85         parser.ctx_id = job->cs->ctx->asid;
86         parser.cs_sequence = job->cs->sequence;
87         parser.job_id = job->id;
88
89         parser.hw_queue_id = job->hw_queue_id;
90         parser.job_userptr_list = &job->userptr_list;
91         parser.patched_cb = NULL;
92         parser.user_cb = job->user_cb;
93         parser.user_cb_size = job->user_cb_size;
94         parser.ext_queue = job->ext_queue;
95         job->patched_cb = NULL;
96         parser.use_virt_addr = hdev->mmu_enable;
97
98         rc = hdev->asic_funcs->cs_parser(hdev, &parser);
99         if (job->ext_queue) {
100                 if (!rc) {
101                         job->patched_cb = parser.patched_cb;
102                         job->job_cb_size = parser.patched_cb_size;
103
104                         spin_lock(&job->patched_cb->lock);
105                         job->patched_cb->cs_cnt++;
106                         spin_unlock(&job->patched_cb->lock);
107                 }
108
109                 /*
110                  * Whether the parsing worked or not, we don't need the
111                  * original CB anymore because it was already parsed and
112                  * won't be accessed again for this CS
113                  */
114                 spin_lock(&job->user_cb->lock);
115                 job->user_cb->cs_cnt--;
116                 spin_unlock(&job->user_cb->lock);
117                 hl_cb_put(job->user_cb);
118                 job->user_cb = NULL;
119         }
120
121         return rc;
122 }
123
124 static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
125 {
126         struct hl_cs *cs = job->cs;
127
128         if (job->ext_queue) {
129                 hl_userptr_delete_list(hdev, &job->userptr_list);
130
131                 /*
132                  * We might arrive here from rollback and patched CB wasn't
133                  * created, so we need to check it's not NULL
134                  */
135                 if (job->patched_cb) {
136                         spin_lock(&job->patched_cb->lock);
137                         job->patched_cb->cs_cnt--;
138                         spin_unlock(&job->patched_cb->lock);
139
140                         hl_cb_put(job->patched_cb);
141                 }
142         }
143
144         /*
145          * This is the only place where there can be multiple threads
146          * modifying the list at the same time
147          */
148         spin_lock(&cs->job_lock);
149         list_del(&job->cs_node);
150         spin_unlock(&cs->job_lock);
151
152         hl_debugfs_remove_job(hdev, job);
153
154         if (job->ext_queue)
155                 cs_put(cs);
156
157         kfree(job);
158 }
159
160 static void cs_do_release(struct kref *ref)
161 {
162         struct hl_cs *cs = container_of(ref, struct hl_cs,
163                                                 refcount);
164         struct hl_device *hdev = cs->ctx->hdev;
165         struct hl_cs_job *job, *tmp;
166
167         cs->completed = true;
168
169         /*
170          * Although if we reached here it means that all external jobs have
171          * finished, because each one of them took refcnt to CS, we still
172          * need to go over the internal jobs and free them. Otherwise, we
173          * will have leaked memory and what's worse, the CS object (and
174          * potentially the CTX object) could be released, while the JOB
175          * still holds a pointer to them (but no reference).
176          */
177         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
178                 free_job(hdev, job);
179
180         /* We also need to update CI for internal queues */
181         if (cs->submitted) {
182                 hl_int_hw_queue_update_ci(cs);
183
184                 spin_lock(&hdev->hw_queues_mirror_lock);
185                 /* remove CS from hw_queues mirror list */
186                 list_del_init(&cs->mirror_node);
187                 spin_unlock(&hdev->hw_queues_mirror_lock);
188
189                 /*
190                  * Don't cancel TDR in case this CS was timedout because we
191                  * might be running from the TDR context
192                  */
193                 if ((!cs->timedout) &&
194                         (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
195                         struct hl_cs *next;
196
197                         if (cs->tdr_active)
198                                 cancel_delayed_work_sync(&cs->work_tdr);
199
200                         spin_lock(&hdev->hw_queues_mirror_lock);
201
202                         /* queue TDR for next CS */
203                         next = list_first_entry_or_null(
204                                         &hdev->hw_queues_mirror_list,
205                                         struct hl_cs, mirror_node);
206
207                         if ((next) && (!next->tdr_active)) {
208                                 next->tdr_active = true;
209                                 schedule_delayed_work(&next->work_tdr,
210                                                         hdev->timeout_jiffies);
211                         }
212
213                         spin_unlock(&hdev->hw_queues_mirror_lock);
214                 }
215         }
216
217         /*
218          * Must be called before hl_ctx_put because inside we use ctx to get
219          * the device
220          */
221         hl_debugfs_remove_cs(cs);
222
223         hl_ctx_put(cs->ctx);
224
225         if (cs->timedout)
226                 dma_fence_set_error(cs->fence, -ETIMEDOUT);
227         else if (cs->aborted)
228                 dma_fence_set_error(cs->fence, -EIO);
229
230         dma_fence_signal(cs->fence);
231         dma_fence_put(cs->fence);
232
233         kfree(cs);
234 }
235
236 static void cs_timedout(struct work_struct *work)
237 {
238         struct hl_device *hdev;
239         int ctx_asid, rc;
240         struct hl_cs *cs = container_of(work, struct hl_cs,
241                                                  work_tdr.work);
242         rc = cs_get_unless_zero(cs);
243         if (!rc)
244                 return;
245
246         if ((!cs->submitted) || (cs->completed)) {
247                 cs_put(cs);
248                 return;
249         }
250
251         /* Mark the CS is timed out so we won't try to cancel its TDR */
252         cs->timedout = true;
253
254         hdev = cs->ctx->hdev;
255         ctx_asid = cs->ctx->asid;
256
257         /* TODO: add information about last signaled seq and last emitted seq */
258         dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
259
260         cs_put(cs);
261
262         if (hdev->reset_on_lockup)
263                 hl_device_reset(hdev, false, false);
264 }
265
266 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
267                         struct hl_cs **cs_new)
268 {
269         struct hl_dma_fence *fence;
270         struct dma_fence *other = NULL;
271         struct hl_cs *cs;
272         int rc;
273
274         cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
275         if (!cs)
276                 return -ENOMEM;
277
278         cs->ctx = ctx;
279         cs->submitted = false;
280         cs->completed = false;
281         INIT_LIST_HEAD(&cs->job_list);
282         INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
283         kref_init(&cs->refcount);
284         spin_lock_init(&cs->job_lock);
285
286         fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
287         if (!fence) {
288                 rc = -ENOMEM;
289                 goto free_cs;
290         }
291
292         fence->hdev = hdev;
293         spin_lock_init(&fence->lock);
294         cs->fence = &fence->base_fence;
295
296         spin_lock(&ctx->cs_lock);
297
298         fence->cs_seq = ctx->cs_sequence;
299         other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
300         if ((other) && (!dma_fence_is_signaled(other))) {
301                 spin_unlock(&ctx->cs_lock);
302                 rc = -EAGAIN;
303                 goto free_fence;
304         }
305
306         dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
307                         ctx->asid, ctx->cs_sequence);
308
309         cs->sequence = fence->cs_seq;
310
311         ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
312                                                         &fence->base_fence;
313         ctx->cs_sequence++;
314
315         dma_fence_get(&fence->base_fence);
316
317         dma_fence_put(other);
318
319         spin_unlock(&ctx->cs_lock);
320
321         *cs_new = cs;
322
323         return 0;
324
325 free_fence:
326         kfree(fence);
327 free_cs:
328         kfree(cs);
329         return rc;
330 }
331
332 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
333 {
334         struct hl_cs_job *job, *tmp;
335
336         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
337                 free_job(hdev, job);
338 }
339
340 void hl_cs_rollback_all(struct hl_device *hdev)
341 {
342         struct hl_cs *cs, *tmp;
343
344         /* flush all completions */
345         flush_workqueue(hdev->cq_wq);
346
347         /* Make sure we don't have leftovers in the H/W queues mirror list */
348         list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
349                                 mirror_node) {
350                 cs_get(cs);
351                 cs->aborted = true;
352                 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
353                                         cs->ctx->asid, cs->sequence);
354                 cs_rollback(hdev, cs);
355                 cs_put(cs);
356         }
357 }
358
359 static void job_wq_completion(struct work_struct *work)
360 {
361         struct hl_cs_job *job = container_of(work, struct hl_cs_job,
362                                                 finish_work);
363         struct hl_cs *cs = job->cs;
364         struct hl_device *hdev = cs->ctx->hdev;
365
366         /* job is no longer needed */
367         free_job(hdev, job);
368 }
369
370 static struct hl_cb *validate_queue_index(struct hl_device *hdev,
371                                         struct hl_cb_mgr *cb_mgr,
372                                         struct hl_cs_chunk *chunk,
373                                         bool *ext_queue)
374 {
375         struct asic_fixed_properties *asic = &hdev->asic_prop;
376         struct hw_queue_properties *hw_queue_prop;
377         u32 cb_handle;
378         struct hl_cb *cb;
379
380         /* Assume external queue */
381         *ext_queue = true;
382
383         hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
384
385         if ((chunk->queue_index >= HL_MAX_QUEUES) ||
386                         (hw_queue_prop->type == QUEUE_TYPE_NA)) {
387                 dev_err(hdev->dev, "Queue index %d is invalid\n",
388                         chunk->queue_index);
389                 return NULL;
390         }
391
392         if (hw_queue_prop->kmd_only) {
393                 dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
394                         chunk->queue_index);
395                 return NULL;
396         } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
397                 *ext_queue = false;
398                 return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
399         }
400
401         /* Retrieve CB object */
402         cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
403
404         cb = hl_cb_get(hdev, cb_mgr, cb_handle);
405         if (!cb) {
406                 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
407                 return NULL;
408         }
409
410         if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
411                 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
412                 goto release_cb;
413         }
414
415         spin_lock(&cb->lock);
416         cb->cs_cnt++;
417         spin_unlock(&cb->lock);
418
419         return cb;
420
421 release_cb:
422         hl_cb_put(cb);
423         return NULL;
424 }
425
426 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
427 {
428         struct hl_cs_job *job;
429
430         job = kzalloc(sizeof(*job), GFP_ATOMIC);
431         if (!job)
432                 return NULL;
433
434         job->ext_queue = ext_queue;
435
436         if (job->ext_queue) {
437                 INIT_LIST_HEAD(&job->userptr_list);
438                 INIT_WORK(&job->finish_work, job_wq_completion);
439         }
440
441         return job;
442 }
443
444 static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
445                         u32 num_chunks, u64 *cs_seq)
446 {
447         struct hl_device *hdev = hpriv->hdev;
448         struct hl_cs_chunk *cs_chunk_array;
449         struct hl_cs_job *job;
450         struct hl_cs *cs;
451         struct hl_cb *cb;
452         bool ext_queue_present = false;
453         u32 size_to_copy;
454         int rc, i, parse_cnt;
455
456         *cs_seq = ULLONG_MAX;
457
458         if (num_chunks > HL_MAX_JOBS_PER_CS) {
459                 dev_err(hdev->dev,
460                         "Number of chunks can NOT be larger than %d\n",
461                         HL_MAX_JOBS_PER_CS);
462                 rc = -EINVAL;
463                 goto out;
464         }
465
466         cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
467                                         GFP_ATOMIC);
468         if (!cs_chunk_array) {
469                 rc = -ENOMEM;
470                 goto out;
471         }
472
473         size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
474         if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
475                 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
476                 rc = -EFAULT;
477                 goto free_cs_chunk_array;
478         }
479
480         /* increment refcnt for context */
481         hl_ctx_get(hdev, hpriv->ctx);
482
483         rc = allocate_cs(hdev, hpriv->ctx, &cs);
484         if (rc) {
485                 hl_ctx_put(hpriv->ctx);
486                 goto free_cs_chunk_array;
487         }
488
489         *cs_seq = cs->sequence;
490
491         hl_debugfs_add_cs(cs);
492
493         /* Validate ALL the CS chunks before submitting the CS */
494         for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
495                 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
496                 bool ext_queue;
497
498                 cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
499                                         &ext_queue);
500                 if (ext_queue) {
501                         ext_queue_present = true;
502                         if (!cb) {
503                                 rc = -EINVAL;
504                                 goto free_cs_object;
505                         }
506                 }
507
508                 job = hl_cs_allocate_job(hdev, ext_queue);
509                 if (!job) {
510                         dev_err(hdev->dev, "Failed to allocate a new job\n");
511                         rc = -ENOMEM;
512                         if (ext_queue)
513                                 goto release_cb;
514                         else
515                                 goto free_cs_object;
516                 }
517
518                 job->id = i + 1;
519                 job->cs = cs;
520                 job->user_cb = cb;
521                 job->user_cb_size = chunk->cb_size;
522                 if (job->ext_queue)
523                         job->job_cb_size = cb->size;
524                 else
525                         job->job_cb_size = chunk->cb_size;
526                 job->hw_queue_id = chunk->queue_index;
527
528                 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
529
530                 list_add_tail(&job->cs_node, &cs->job_list);
531
532                 /*
533                  * Increment CS reference. When CS reference is 0, CS is
534                  * done and can be signaled to user and free all its resources
535                  * Only increment for JOB on external queues, because only
536                  * for those JOBs we get completion
537                  */
538                 if (job->ext_queue)
539                         cs_get(cs);
540
541                 hl_debugfs_add_job(hdev, job);
542
543                 rc = cs_parser(hpriv, job);
544                 if (rc) {
545                         dev_err(hdev->dev,
546                                 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
547                                 cs->ctx->asid, cs->sequence, job->id, rc);
548                         goto free_cs_object;
549                 }
550         }
551
552         if (!ext_queue_present) {
553                 dev_err(hdev->dev,
554                         "Reject CS %d.%llu because no external queues jobs\n",
555                         cs->ctx->asid, cs->sequence);
556                 rc = -EINVAL;
557                 goto free_cs_object;
558         }
559
560         rc = hl_hw_queue_schedule_cs(cs);
561         if (rc) {
562                 dev_err(hdev->dev,
563                         "Failed to submit CS %d.%llu to H/W queues, error %d\n",
564                         cs->ctx->asid, cs->sequence, rc);
565                 goto free_cs_object;
566         }
567
568         rc = HL_CS_STATUS_SUCCESS;
569         goto put_cs;
570
571 release_cb:
572         spin_lock(&cb->lock);
573         cb->cs_cnt--;
574         spin_unlock(&cb->lock);
575         hl_cb_put(cb);
576 free_cs_object:
577         cs_rollback(hdev, cs);
578         *cs_seq = ULLONG_MAX;
579         /* The path below is both for good and erroneous exits */
580 put_cs:
581         /* We finished with the CS in this function, so put the ref */
582         cs_put(cs);
583 free_cs_chunk_array:
584         kfree(cs_chunk_array);
585 out:
586         return rc;
587 }
588
589 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
590 {
591         struct hl_device *hdev = hpriv->hdev;
592         union hl_cs_args *args = data;
593         struct hl_ctx *ctx = hpriv->ctx;
594         void __user *chunks;
595         u32 num_chunks;
596         u64 cs_seq = ULONG_MAX;
597         int rc, do_restore;
598         bool need_soft_reset = false;
599
600         if (hl_device_disabled_or_in_reset(hdev)) {
601                 dev_warn(hdev->dev,
602                         "Device is %s. Can't submit new CS\n",
603                         atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
604                 rc = -EBUSY;
605                 goto out;
606         }
607
608         do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
609
610         if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
611                 long ret;
612
613                 chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
614                 num_chunks = args->in.num_chunks_restore;
615
616                 mutex_lock(&hpriv->restore_phase_mutex);
617
618                 if (do_restore) {
619                         rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
620                         if (rc) {
621                                 dev_err_ratelimited(hdev->dev,
622                                         "Failed to switch to context %d, rejecting CS! %d\n",
623                                         ctx->asid, rc);
624                                 /*
625                                  * If we timedout, or if the device is not IDLE
626                                  * while we want to do context-switch (-EBUSY),
627                                  * we need to soft-reset because QMAN is
628                                  * probably stuck. However, we can't call to
629                                  * reset here directly because of deadlock, so
630                                  * need to do it at the very end of this
631                                  * function
632                                  */
633                                 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
634                                         need_soft_reset = true;
635                                 mutex_unlock(&hpriv->restore_phase_mutex);
636                                 goto out;
637                         }
638                 }
639
640                 hdev->asic_funcs->restore_phase_topology(hdev);
641
642                 if (num_chunks == 0) {
643                         dev_dbg(hdev->dev,
644                         "Need to run restore phase but restore CS is empty\n");
645                         rc = 0;
646                 } else {
647                         rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
648                                                 &cs_seq);
649                 }
650
651                 mutex_unlock(&hpriv->restore_phase_mutex);
652
653                 if (rc) {
654                         dev_err(hdev->dev,
655                                 "Failed to submit restore CS for context %d (%d)\n",
656                                 ctx->asid, rc);
657                         goto out;
658                 }
659
660                 /* Need to wait for restore completion before execution phase */
661                 if (num_chunks > 0) {
662                         ret = _hl_cs_wait_ioctl(hdev, ctx,
663                                         jiffies_to_usecs(hdev->timeout_jiffies),
664                                         cs_seq);
665                         if (ret <= 0) {
666                                 dev_err(hdev->dev,
667                                         "Restore CS for context %d failed to complete %ld\n",
668                                         ctx->asid, ret);
669                                 rc = -ENOEXEC;
670                                 goto out;
671                         }
672                 }
673
674                 ctx->thread_restore_wait_token = 1;
675         } else if (!ctx->thread_restore_wait_token) {
676                 u32 tmp;
677
678                 rc = hl_poll_timeout_memory(hdev,
679                         (u64) (uintptr_t) &ctx->thread_restore_wait_token,
680                         jiffies_to_usecs(hdev->timeout_jiffies),
681                         &tmp);
682
683                 if (rc || !tmp) {
684                         dev_err(hdev->dev,
685                                 "restore phase hasn't finished in time\n");
686                         rc = -ETIMEDOUT;
687                         goto out;
688                 }
689         }
690
691         chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
692         num_chunks = args->in.num_chunks_execute;
693
694         if (num_chunks == 0) {
695                 dev_err(hdev->dev,
696                         "Got execute CS with 0 chunks, context %d\n",
697                         ctx->asid);
698                 rc = -EINVAL;
699                 goto out;
700         }
701
702         rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
703
704 out:
705         if (rc != -EAGAIN) {
706                 memset(args, 0, sizeof(*args));
707                 args->out.status = rc;
708                 args->out.seq = cs_seq;
709         }
710
711         if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
712                 hl_device_reset(hdev, false, false);
713
714         return rc;
715 }
716
717 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
718                 struct hl_ctx *ctx, u64 timeout_us, u64 seq)
719 {
720         struct dma_fence *fence;
721         unsigned long timeout;
722         long rc;
723
724         if (timeout_us == MAX_SCHEDULE_TIMEOUT)
725                 timeout = timeout_us;
726         else
727                 timeout = usecs_to_jiffies(timeout_us);
728
729         hl_ctx_get(hdev, ctx);
730
731         fence = hl_ctx_get_fence(ctx, seq);
732         if (IS_ERR(fence)) {
733                 rc = PTR_ERR(fence);
734         } else if (fence) {
735                 rc = dma_fence_wait_timeout(fence, true, timeout);
736                 if (fence->error == -ETIMEDOUT)
737                         rc = -ETIMEDOUT;
738                 else if (fence->error == -EIO)
739                         rc = -EIO;
740                 dma_fence_put(fence);
741         } else
742                 rc = 1;
743
744         hl_ctx_put(ctx);
745
746         return rc;
747 }
748
749 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
750 {
751         struct hl_device *hdev = hpriv->hdev;
752         union hl_wait_cs_args *args = data;
753         u64 seq = args->in.seq;
754         long rc;
755
756         rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
757
758         memset(args, 0, sizeof(*args));
759
760         if (rc < 0) {
761                 dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
762                         rc, seq);
763                 if (rc == -ERESTARTSYS) {
764                         args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
765                         rc = -EINTR;
766                 } else if (rc == -ETIMEDOUT) {
767                         args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
768                 } else if (rc == -EIO) {
769                         args->out.status = HL_WAIT_CS_STATUS_ABORTED;
770                 }
771                 return rc;
772         }
773
774         if (rc == 0)
775                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
776         else
777                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
778
779         return 0;
780 }