Merge tag 'amlogic-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman...
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / command_submission.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK   (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15                                 HL_CS_FLAGS_COLLECTIVE_WAIT)
16
17 /**
18  * enum hl_cs_wait_status - cs wait status
19  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20  * @CS_WAIT_STATUS_COMPLETED: cs completed
21  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
22  */
23 enum hl_cs_wait_status {
24         CS_WAIT_STATUS_BUSY,
25         CS_WAIT_STATUS_COMPLETED,
26         CS_WAIT_STATUS_GONE
27 };
28
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31                                 u64 timeout_us, u64 seq,
32                                 enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
34
35 static void hl_sob_reset(struct kref *ref)
36 {
37         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38                                                         kref);
39         struct hl_device *hdev = hw_sob->hdev;
40
41         hdev->asic_funcs->reset_sob(hdev, hw_sob);
42 }
43
44 void hl_sob_reset_error(struct kref *ref)
45 {
46         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
47                                                         kref);
48         struct hl_device *hdev = hw_sob->hdev;
49
50         dev_crit(hdev->dev,
51                         "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
52                         hw_sob->q_idx, hw_sob->sob_id);
53 }
54
55 /**
56  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
57  * @sob_base: sob base id
58  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
59  * @mask: generated mask
60  *
61  * Return: 0 if given parameters are valid
62  */
63 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
64 {
65         int i;
66
67         if (sob_mask == 0)
68                 return -EINVAL;
69
70         if (sob_mask == 0x1) {
71                 *mask = ~(1 << (sob_base & 0x7));
72         } else {
73                 /* find msb in order to verify sob range is valid */
74                 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
75                         if (BIT(i) & sob_mask)
76                                 break;
77
78                 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
79                         return -EINVAL;
80
81                 *mask = ~sob_mask;
82         }
83
84         return 0;
85 }
86
87 static void hl_fence_release(struct kref *kref)
88 {
89         struct hl_fence *fence =
90                 container_of(kref, struct hl_fence, refcount);
91         struct hl_cs_compl *hl_cs_cmpl =
92                 container_of(fence, struct hl_cs_compl, base_fence);
93         struct hl_device *hdev = hl_cs_cmpl->hdev;
94
95         /* EBUSY means the CS was never submitted and hence we don't have
96          * an attached hw_sob object that we should handle here
97          */
98         if (fence->error == -EBUSY)
99                 goto free;
100
101         if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
102                 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
103                 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
104
105                 dev_dbg(hdev->dev,
106                         "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
107                         hl_cs_cmpl->cs_seq,
108                         hl_cs_cmpl->type,
109                         hl_cs_cmpl->hw_sob->sob_id,
110                         hl_cs_cmpl->sob_val);
111
112                 /*
113                  * A signal CS can get completion while the corresponding wait
114                  * for signal CS is on its way to the PQ. The wait for signal CS
115                  * will get stuck if the signal CS incremented the SOB to its
116                  * max value and there are no pending (submitted) waits on this
117                  * SOB.
118                  * We do the following to void this situation:
119                  * 1. The wait for signal CS must get a ref for the signal CS as
120                  *    soon as possible in cs_ioctl_signal_wait() and put it
121                  *    before being submitted to the PQ but after it incremented
122                  *    the SOB refcnt in init_signal_wait_cs().
123                  * 2. Signal/Wait for signal CS will decrement the SOB refcnt
124                  *    here.
125                  * These two measures guarantee that the wait for signal CS will
126                  * reset the SOB upon completion rather than the signal CS and
127                  * hence the above scenario is avoided.
128                  */
129                 kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
130
131                 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
132                         hdev->asic_funcs->reset_sob_group(hdev,
133                                         hl_cs_cmpl->sob_group);
134         }
135
136 free:
137         kfree(hl_cs_cmpl);
138 }
139
140 void hl_fence_put(struct hl_fence *fence)
141 {
142         if (fence)
143                 kref_put(&fence->refcount, hl_fence_release);
144 }
145
146 void hl_fence_get(struct hl_fence *fence)
147 {
148         if (fence)
149                 kref_get(&fence->refcount);
150 }
151
152 static void hl_fence_init(struct hl_fence *fence)
153 {
154         kref_init(&fence->refcount);
155         fence->error = 0;
156         fence->timestamp = ktime_set(0, 0);
157         init_completion(&fence->completion);
158 }
159
160 void cs_get(struct hl_cs *cs)
161 {
162         kref_get(&cs->refcount);
163 }
164
165 static int cs_get_unless_zero(struct hl_cs *cs)
166 {
167         return kref_get_unless_zero(&cs->refcount);
168 }
169
170 static void cs_put(struct hl_cs *cs)
171 {
172         kref_put(&cs->refcount, cs_do_release);
173 }
174
175 static void cs_job_do_release(struct kref *ref)
176 {
177         struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
178
179         kfree(job);
180 }
181
182 static void cs_job_put(struct hl_cs_job *job)
183 {
184         kref_put(&job->refcount, cs_job_do_release);
185 }
186
187 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
188 {
189         /*
190          * Patched CB is created for external queues jobs, and for H/W queues
191          * jobs if the user CB was allocated by driver and MMU is disabled.
192          */
193         return (job->queue_type == QUEUE_TYPE_EXT ||
194                         (job->queue_type == QUEUE_TYPE_HW &&
195                                         job->is_kernel_allocated_cb &&
196                                         !hdev->mmu_enable));
197 }
198
199 /*
200  * cs_parser - parse the user command submission
201  *
202  * @hpriv       : pointer to the private data of the fd
203  * @job        : pointer to the job that holds the command submission info
204  *
205  * The function parses the command submission of the user. It calls the
206  * ASIC specific parser, which returns a list of memory blocks to send
207  * to the device as different command buffers
208  *
209  */
210 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
211 {
212         struct hl_device *hdev = hpriv->hdev;
213         struct hl_cs_parser parser;
214         int rc;
215
216         parser.ctx_id = job->cs->ctx->asid;
217         parser.cs_sequence = job->cs->sequence;
218         parser.job_id = job->id;
219
220         parser.hw_queue_id = job->hw_queue_id;
221         parser.job_userptr_list = &job->userptr_list;
222         parser.patched_cb = NULL;
223         parser.user_cb = job->user_cb;
224         parser.user_cb_size = job->user_cb_size;
225         parser.queue_type = job->queue_type;
226         parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
227         job->patched_cb = NULL;
228
229         rc = hdev->asic_funcs->cs_parser(hdev, &parser);
230
231         if (is_cb_patched(hdev, job)) {
232                 if (!rc) {
233                         job->patched_cb = parser.patched_cb;
234                         job->job_cb_size = parser.patched_cb_size;
235                         job->contains_dma_pkt = parser.contains_dma_pkt;
236                         atomic_inc(&job->patched_cb->cs_cnt);
237                 }
238
239                 /*
240                  * Whether the parsing worked or not, we don't need the
241                  * original CB anymore because it was already parsed and
242                  * won't be accessed again for this CS
243                  */
244                 atomic_dec(&job->user_cb->cs_cnt);
245                 hl_cb_put(job->user_cb);
246                 job->user_cb = NULL;
247         } else if (!rc) {
248                 job->job_cb_size = job->user_cb_size;
249         }
250
251         return rc;
252 }
253
254 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
255 {
256         struct hl_cs *cs = job->cs;
257
258         if (is_cb_patched(hdev, job)) {
259                 hl_userptr_delete_list(hdev, &job->userptr_list);
260
261                 /*
262                  * We might arrive here from rollback and patched CB wasn't
263                  * created, so we need to check it's not NULL
264                  */
265                 if (job->patched_cb) {
266                         atomic_dec(&job->patched_cb->cs_cnt);
267                         hl_cb_put(job->patched_cb);
268                 }
269         }
270
271         /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
272          * enabled, the user CB isn't released in cs_parser() and thus should be
273          * released here.
274          * This is also true for INT queues jobs which were allocated by driver
275          */
276         if (job->is_kernel_allocated_cb &&
277                 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
278                                 job->queue_type == QUEUE_TYPE_INT)) {
279                 atomic_dec(&job->user_cb->cs_cnt);
280                 hl_cb_put(job->user_cb);
281         }
282
283         /*
284          * This is the only place where there can be multiple threads
285          * modifying the list at the same time
286          */
287         spin_lock(&cs->job_lock);
288         list_del(&job->cs_node);
289         spin_unlock(&cs->job_lock);
290
291         hl_debugfs_remove_job(hdev, job);
292
293         if (job->queue_type == QUEUE_TYPE_EXT ||
294                         job->queue_type == QUEUE_TYPE_HW)
295                 cs_put(cs);
296
297         cs_job_put(job);
298 }
299
300 static void cs_do_release(struct kref *ref)
301 {
302         struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
303         struct hl_device *hdev = cs->ctx->hdev;
304         struct hl_cs_job *job, *tmp;
305
306         cs->completed = true;
307
308         /*
309          * Although if we reached here it means that all external jobs have
310          * finished, because each one of them took refcnt to CS, we still
311          * need to go over the internal jobs and complete them. Otherwise, we
312          * will have leaked memory and what's worse, the CS object (and
313          * potentially the CTX object) could be released, while the JOB
314          * still holds a pointer to them (but no reference).
315          */
316         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
317                 complete_job(hdev, job);
318
319         if (!cs->submitted) {
320                 /* In case the wait for signal CS was submitted, the put occurs
321                  * in init_signal_wait_cs() or collective_wait_init_cs()
322                  * right before hanging on the PQ.
323                  */
324                 if (cs->type == CS_TYPE_WAIT ||
325                                 cs->type == CS_TYPE_COLLECTIVE_WAIT)
326                         hl_fence_put(cs->signal_fence);
327
328                 goto out;
329         }
330
331         hdev->asic_funcs->hw_queues_lock(hdev);
332
333         hdev->cs_active_cnt--;
334         if (!hdev->cs_active_cnt) {
335                 struct hl_device_idle_busy_ts *ts;
336
337                 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
338                 ts->busy_to_idle_ts = ktime_get();
339
340                 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
341                         hdev->idle_busy_ts_idx = 0;
342         } else if (hdev->cs_active_cnt < 0) {
343                 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
344                         hdev->cs_active_cnt);
345         }
346
347         hdev->asic_funcs->hw_queues_unlock(hdev);
348
349         /* Need to update CI for internal queues */
350         hl_int_hw_queue_update_ci(cs);
351
352         /* remove CS from CS mirror list */
353         spin_lock(&hdev->cs_mirror_lock);
354         list_del_init(&cs->mirror_node);
355         spin_unlock(&hdev->cs_mirror_lock);
356
357         /* Don't cancel TDR in case this CS was timedout because we might be
358          * running from the TDR context
359          */
360         if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
361                 struct hl_cs *next;
362
363                 if (cs->tdr_active)
364                         cancel_delayed_work_sync(&cs->work_tdr);
365
366                 spin_lock(&hdev->cs_mirror_lock);
367
368                 /* queue TDR for next CS */
369                 next = list_first_entry_or_null(&hdev->cs_mirror_list,
370                                                 struct hl_cs, mirror_node);
371
372                 if (next && !next->tdr_active) {
373                         next->tdr_active = true;
374                         schedule_delayed_work(&next->work_tdr,
375                                                 hdev->timeout_jiffies);
376                 }
377
378                 spin_unlock(&hdev->cs_mirror_lock);
379         }
380
381 out:
382         /* Must be called before hl_ctx_put because inside we use ctx to get
383          * the device
384          */
385         hl_debugfs_remove_cs(cs);
386
387         hl_ctx_put(cs->ctx);
388
389         /* We need to mark an error for not submitted because in that case
390          * the hl fence release flow is different. Mainly, we don't need
391          * to handle hw_sob for signal/wait
392          */
393         if (cs->timedout)
394                 cs->fence->error = -ETIMEDOUT;
395         else if (cs->aborted)
396                 cs->fence->error = -EIO;
397         else if (!cs->submitted)
398                 cs->fence->error = -EBUSY;
399
400         if (cs->timestamp)
401                 cs->fence->timestamp = ktime_get();
402         complete_all(&cs->fence->completion);
403         hl_fence_put(cs->fence);
404
405         kfree(cs->jobs_in_queue_cnt);
406         kfree(cs);
407 }
408
409 static void cs_timedout(struct work_struct *work)
410 {
411         struct hl_device *hdev;
412         int rc;
413         struct hl_cs *cs = container_of(work, struct hl_cs,
414                                                  work_tdr.work);
415         rc = cs_get_unless_zero(cs);
416         if (!rc)
417                 return;
418
419         if ((!cs->submitted) || (cs->completed)) {
420                 cs_put(cs);
421                 return;
422         }
423
424         /* Mark the CS is timed out so we won't try to cancel its TDR */
425         cs->timedout = true;
426
427         hdev = cs->ctx->hdev;
428
429         switch (cs->type) {
430         case CS_TYPE_SIGNAL:
431                 dev_err(hdev->dev,
432                         "Signal command submission %llu has not finished in time!\n",
433                         cs->sequence);
434                 break;
435
436         case CS_TYPE_WAIT:
437                 dev_err(hdev->dev,
438                         "Wait command submission %llu has not finished in time!\n",
439                         cs->sequence);
440                 break;
441
442         case CS_TYPE_COLLECTIVE_WAIT:
443                 dev_err(hdev->dev,
444                         "Collective Wait command submission %llu has not finished in time!\n",
445                         cs->sequence);
446                 break;
447
448         default:
449                 dev_err(hdev->dev,
450                         "Command submission %llu has not finished in time!\n",
451                         cs->sequence);
452                 break;
453         }
454
455         cs_put(cs);
456
457         if (hdev->reset_on_lockup)
458                 hl_device_reset(hdev, false, false);
459         else
460                 hdev->needs_reset = true;
461 }
462
463 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
464                         enum hl_cs_type cs_type, struct hl_cs **cs_new)
465 {
466         struct hl_cs_counters_atomic *cntr;
467         struct hl_fence *other = NULL;
468         struct hl_cs_compl *cs_cmpl;
469         struct hl_cs *cs;
470         int rc;
471
472         cntr = &hdev->aggregated_cs_counters;
473
474         cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
475         if (!cs) {
476                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
477                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
478                 return -ENOMEM;
479         }
480
481         cs->ctx = ctx;
482         cs->submitted = false;
483         cs->completed = false;
484         cs->type = cs_type;
485         INIT_LIST_HEAD(&cs->job_list);
486         INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
487         kref_init(&cs->refcount);
488         spin_lock_init(&cs->job_lock);
489
490         cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
491         if (!cs_cmpl) {
492                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
493                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
494                 rc = -ENOMEM;
495                 goto free_cs;
496         }
497
498         cs_cmpl->hdev = hdev;
499         cs_cmpl->type = cs->type;
500         spin_lock_init(&cs_cmpl->lock);
501         cs->fence = &cs_cmpl->base_fence;
502
503         spin_lock(&ctx->cs_lock);
504
505         cs_cmpl->cs_seq = ctx->cs_sequence;
506         other = ctx->cs_pending[cs_cmpl->cs_seq &
507                                 (hdev->asic_prop.max_pending_cs - 1)];
508
509         if (other && !completion_done(&other->completion)) {
510                 dev_dbg_ratelimited(hdev->dev,
511                         "Rejecting CS because of too many in-flights CS\n");
512                 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
513                 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
514                 rc = -EAGAIN;
515                 goto free_fence;
516         }
517
518         cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
519                         sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
520         if (!cs->jobs_in_queue_cnt) {
521                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
522                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
523                 rc = -ENOMEM;
524                 goto free_fence;
525         }
526
527         /* init hl_fence */
528         hl_fence_init(&cs_cmpl->base_fence);
529
530         cs->sequence = cs_cmpl->cs_seq;
531
532         ctx->cs_pending[cs_cmpl->cs_seq &
533                         (hdev->asic_prop.max_pending_cs - 1)] =
534                                                         &cs_cmpl->base_fence;
535         ctx->cs_sequence++;
536
537         hl_fence_get(&cs_cmpl->base_fence);
538
539         hl_fence_put(other);
540
541         spin_unlock(&ctx->cs_lock);
542
543         *cs_new = cs;
544
545         return 0;
546
547 free_fence:
548         spin_unlock(&ctx->cs_lock);
549         kfree(cs_cmpl);
550 free_cs:
551         kfree(cs);
552         return rc;
553 }
554
555 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
556 {
557         struct hl_cs_job *job, *tmp;
558
559         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
560                 complete_job(hdev, job);
561 }
562
563 void hl_cs_rollback_all(struct hl_device *hdev)
564 {
565         int i;
566         struct hl_cs *cs, *tmp;
567
568         /* flush all completions */
569         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
570                 flush_workqueue(hdev->cq_wq[i]);
571
572         /* Make sure we don't have leftovers in the CS mirror list */
573         list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
574                 cs_get(cs);
575                 cs->aborted = true;
576                 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
577                                         cs->ctx->asid, cs->sequence);
578                 cs_rollback(hdev, cs);
579                 cs_put(cs);
580         }
581 }
582
583 static void job_wq_completion(struct work_struct *work)
584 {
585         struct hl_cs_job *job = container_of(work, struct hl_cs_job,
586                                                 finish_work);
587         struct hl_cs *cs = job->cs;
588         struct hl_device *hdev = cs->ctx->hdev;
589
590         /* job is no longer needed */
591         complete_job(hdev, job);
592 }
593
594 static int validate_queue_index(struct hl_device *hdev,
595                                 struct hl_cs_chunk *chunk,
596                                 enum hl_queue_type *queue_type,
597                                 bool *is_kernel_allocated_cb)
598 {
599         struct asic_fixed_properties *asic = &hdev->asic_prop;
600         struct hw_queue_properties *hw_queue_prop;
601
602         /* This must be checked here to prevent out-of-bounds access to
603          * hw_queues_props array
604          */
605         if (chunk->queue_index >= asic->max_queues) {
606                 dev_err(hdev->dev, "Queue index %d is invalid\n",
607                         chunk->queue_index);
608                 return -EINVAL;
609         }
610
611         hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
612
613         if (hw_queue_prop->type == QUEUE_TYPE_NA) {
614                 dev_err(hdev->dev, "Queue index %d is invalid\n",
615                         chunk->queue_index);
616                 return -EINVAL;
617         }
618
619         if (hw_queue_prop->driver_only) {
620                 dev_err(hdev->dev,
621                         "Queue index %d is restricted for the kernel driver\n",
622                         chunk->queue_index);
623                 return -EINVAL;
624         }
625
626         /* When hw queue type isn't QUEUE_TYPE_HW,
627          * USER_ALLOC_CB flag shall be referred as "don't care".
628          */
629         if (hw_queue_prop->type == QUEUE_TYPE_HW) {
630                 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
631                         if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
632                                 dev_err(hdev->dev,
633                                         "Queue index %d doesn't support user CB\n",
634                                         chunk->queue_index);
635                                 return -EINVAL;
636                         }
637
638                         *is_kernel_allocated_cb = false;
639                 } else {
640                         if (!(hw_queue_prop->cb_alloc_flags &
641                                         CB_ALLOC_KERNEL)) {
642                                 dev_err(hdev->dev,
643                                         "Queue index %d doesn't support kernel CB\n",
644                                         chunk->queue_index);
645                                 return -EINVAL;
646                         }
647
648                         *is_kernel_allocated_cb = true;
649                 }
650         } else {
651                 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
652                                                 & CB_ALLOC_KERNEL);
653         }
654
655         *queue_type = hw_queue_prop->type;
656         return 0;
657 }
658
659 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
660                                         struct hl_cb_mgr *cb_mgr,
661                                         struct hl_cs_chunk *chunk)
662 {
663         struct hl_cb *cb;
664         u32 cb_handle;
665
666         cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
667
668         cb = hl_cb_get(hdev, cb_mgr, cb_handle);
669         if (!cb) {
670                 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
671                 return NULL;
672         }
673
674         if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
675                 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
676                 goto release_cb;
677         }
678
679         atomic_inc(&cb->cs_cnt);
680
681         return cb;
682
683 release_cb:
684         hl_cb_put(cb);
685         return NULL;
686 }
687
688 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
689                 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
690 {
691         struct hl_cs_job *job;
692
693         job = kzalloc(sizeof(*job), GFP_ATOMIC);
694         if (!job)
695                 return NULL;
696
697         kref_init(&job->refcount);
698         job->queue_type = queue_type;
699         job->is_kernel_allocated_cb = is_kernel_allocated_cb;
700
701         if (is_cb_patched(hdev, job))
702                 INIT_LIST_HEAD(&job->userptr_list);
703
704         if (job->queue_type == QUEUE_TYPE_EXT)
705                 INIT_WORK(&job->finish_work, job_wq_completion);
706
707         return job;
708 }
709
710 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
711 {
712         if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
713                 return CS_TYPE_SIGNAL;
714         else if (cs_type_flags & HL_CS_FLAGS_WAIT)
715                 return CS_TYPE_WAIT;
716         else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
717                 return CS_TYPE_COLLECTIVE_WAIT;
718         else
719                 return CS_TYPE_DEFAULT;
720 }
721
722 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
723 {
724         struct hl_device *hdev = hpriv->hdev;
725         struct hl_ctx *ctx = hpriv->ctx;
726         u32 cs_type_flags, num_chunks;
727         enum hl_device_status status;
728         enum hl_cs_type cs_type;
729
730         if (!hl_device_operational(hdev, &status)) {
731                 dev_warn_ratelimited(hdev->dev,
732                         "Device is %s. Can't submit new CS\n",
733                         hdev->status[status]);
734                 return -EBUSY;
735         }
736
737         cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
738
739         if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
740                 dev_err(hdev->dev,
741                         "CS type flags are mutually exclusive, context %d\n",
742                         ctx->asid);
743                 return -EINVAL;
744         }
745
746         cs_type = hl_cs_get_cs_type(cs_type_flags);
747         num_chunks = args->in.num_chunks_execute;
748
749         if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
750                                         !hdev->supports_sync_stream)) {
751                 dev_err(hdev->dev, "Sync stream CS is not supported\n");
752                 return -EINVAL;
753         }
754
755         if (cs_type == CS_TYPE_DEFAULT) {
756                 if (!num_chunks) {
757                         dev_err(hdev->dev,
758                                 "Got execute CS with 0 chunks, context %d\n",
759                                 ctx->asid);
760                         return -EINVAL;
761                 }
762         } else if (num_chunks != 1) {
763                 dev_err(hdev->dev,
764                         "Sync stream CS mandates one chunk only, context %d\n",
765                         ctx->asid);
766                 return -EINVAL;
767         }
768
769         return 0;
770 }
771
772 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
773                                         struct hl_cs_chunk **cs_chunk_array,
774                                         void __user *chunks, u32 num_chunks,
775                                         struct hl_ctx *ctx)
776 {
777         u32 size_to_copy;
778
779         if (num_chunks > HL_MAX_JOBS_PER_CS) {
780                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
781                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
782                 dev_err(hdev->dev,
783                         "Number of chunks can NOT be larger than %d\n",
784                         HL_MAX_JOBS_PER_CS);
785                 return -EINVAL;
786         }
787
788         *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
789                                         GFP_ATOMIC);
790         if (!*cs_chunk_array) {
791                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
792                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
793                 return -ENOMEM;
794         }
795
796         size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
797         if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
798                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
799                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
800                 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
801                 kfree(*cs_chunk_array);
802                 return -EFAULT;
803         }
804
805         return 0;
806 }
807
808 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
809                                 u32 num_chunks, u64 *cs_seq, bool timestamp)
810 {
811         bool int_queues_only = true;
812         struct hl_device *hdev = hpriv->hdev;
813         struct hl_cs_chunk *cs_chunk_array;
814         struct hl_cs_counters_atomic *cntr;
815         struct hl_ctx *ctx = hpriv->ctx;
816         struct hl_cs_job *job;
817         struct hl_cs *cs;
818         struct hl_cb *cb;
819         int rc, i;
820
821         cntr = &hdev->aggregated_cs_counters;
822         *cs_seq = ULLONG_MAX;
823
824         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
825                         hpriv->ctx);
826         if (rc)
827                 goto out;
828
829         /* increment refcnt for context */
830         hl_ctx_get(hdev, hpriv->ctx);
831
832         rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
833         if (rc) {
834                 hl_ctx_put(hpriv->ctx);
835                 goto free_cs_chunk_array;
836         }
837
838         cs->timestamp = !!timestamp;
839         *cs_seq = cs->sequence;
840
841         hl_debugfs_add_cs(cs);
842
843         /* Validate ALL the CS chunks before submitting the CS */
844         for (i = 0 ; i < num_chunks ; i++) {
845                 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
846                 enum hl_queue_type queue_type;
847                 bool is_kernel_allocated_cb;
848
849                 rc = validate_queue_index(hdev, chunk, &queue_type,
850                                                 &is_kernel_allocated_cb);
851                 if (rc) {
852                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
853                         atomic64_inc(&cntr->validation_drop_cnt);
854                         goto free_cs_object;
855                 }
856
857                 if (is_kernel_allocated_cb) {
858                         cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
859                         if (!cb) {
860                                 atomic64_inc(
861                                         &ctx->cs_counters.validation_drop_cnt);
862                                 atomic64_inc(&cntr->validation_drop_cnt);
863                                 rc = -EINVAL;
864                                 goto free_cs_object;
865                         }
866                 } else {
867                         cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
868                 }
869
870                 if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
871                         int_queues_only = false;
872
873                 job = hl_cs_allocate_job(hdev, queue_type,
874                                                 is_kernel_allocated_cb);
875                 if (!job) {
876                         atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
877                         atomic64_inc(&cntr->out_of_mem_drop_cnt);
878                         dev_err(hdev->dev, "Failed to allocate a new job\n");
879                         rc = -ENOMEM;
880                         if (is_kernel_allocated_cb)
881                                 goto release_cb;
882
883                         goto free_cs_object;
884                 }
885
886                 job->id = i + 1;
887                 job->cs = cs;
888                 job->user_cb = cb;
889                 job->user_cb_size = chunk->cb_size;
890                 job->hw_queue_id = chunk->queue_index;
891
892                 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
893
894                 list_add_tail(&job->cs_node, &cs->job_list);
895
896                 /*
897                  * Increment CS reference. When CS reference is 0, CS is
898                  * done and can be signaled to user and free all its resources
899                  * Only increment for JOB on external or H/W queues, because
900                  * only for those JOBs we get completion
901                  */
902                 if (job->queue_type == QUEUE_TYPE_EXT ||
903                                 job->queue_type == QUEUE_TYPE_HW)
904                         cs_get(cs);
905
906                 hl_debugfs_add_job(hdev, job);
907
908                 rc = cs_parser(hpriv, job);
909                 if (rc) {
910                         atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
911                         atomic64_inc(&cntr->parsing_drop_cnt);
912                         dev_err(hdev->dev,
913                                 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
914                                 cs->ctx->asid, cs->sequence, job->id, rc);
915                         goto free_cs_object;
916                 }
917         }
918
919         if (int_queues_only) {
920                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
921                 atomic64_inc(&cntr->validation_drop_cnt);
922                 dev_err(hdev->dev,
923                         "Reject CS %d.%llu because only internal queues jobs are present\n",
924                         cs->ctx->asid, cs->sequence);
925                 rc = -EINVAL;
926                 goto free_cs_object;
927         }
928
929         rc = hl_hw_queue_schedule_cs(cs);
930         if (rc) {
931                 if (rc != -EAGAIN)
932                         dev_err(hdev->dev,
933                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
934                                 cs->ctx->asid, cs->sequence, rc);
935                 goto free_cs_object;
936         }
937
938         rc = HL_CS_STATUS_SUCCESS;
939         goto put_cs;
940
941 release_cb:
942         atomic_dec(&cb->cs_cnt);
943         hl_cb_put(cb);
944 free_cs_object:
945         cs_rollback(hdev, cs);
946         *cs_seq = ULLONG_MAX;
947         /* The path below is both for good and erroneous exits */
948 put_cs:
949         /* We finished with the CS in this function, so put the ref */
950         cs_put(cs);
951 free_cs_chunk_array:
952         kfree(cs_chunk_array);
953 out:
954         return rc;
955 }
956
957 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
958                                 u64 *cs_seq)
959 {
960         struct hl_device *hdev = hpriv->hdev;
961         struct hl_ctx *ctx = hpriv->ctx;
962         bool need_soft_reset = false;
963         int rc = 0, do_ctx_switch;
964         void __user *chunks;
965         u32 num_chunks, tmp;
966         int ret;
967
968         do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
969
970         if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
971                 mutex_lock(&hpriv->restore_phase_mutex);
972
973                 if (do_ctx_switch) {
974                         rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
975                         if (rc) {
976                                 dev_err_ratelimited(hdev->dev,
977                                         "Failed to switch to context %d, rejecting CS! %d\n",
978                                         ctx->asid, rc);
979                                 /*
980                                  * If we timedout, or if the device is not IDLE
981                                  * while we want to do context-switch (-EBUSY),
982                                  * we need to soft-reset because QMAN is
983                                  * probably stuck. However, we can't call to
984                                  * reset here directly because of deadlock, so
985                                  * need to do it at the very end of this
986                                  * function
987                                  */
988                                 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
989                                         need_soft_reset = true;
990                                 mutex_unlock(&hpriv->restore_phase_mutex);
991                                 goto out;
992                         }
993                 }
994
995                 hdev->asic_funcs->restore_phase_topology(hdev);
996
997                 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
998                 num_chunks = args->in.num_chunks_restore;
999
1000                 if (!num_chunks) {
1001                         dev_dbg(hdev->dev,
1002                                 "Need to run restore phase but restore CS is empty\n");
1003                         rc = 0;
1004                 } else {
1005                         rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1006                                                 cs_seq, false);
1007                 }
1008
1009                 mutex_unlock(&hpriv->restore_phase_mutex);
1010
1011                 if (rc) {
1012                         dev_err(hdev->dev,
1013                                 "Failed to submit restore CS for context %d (%d)\n",
1014                                 ctx->asid, rc);
1015                         goto out;
1016                 }
1017
1018                 /* Need to wait for restore completion before execution phase */
1019                 if (num_chunks) {
1020                         enum hl_cs_wait_status status;
1021 wait_again:
1022                         ret = _hl_cs_wait_ioctl(hdev, ctx,
1023                                         jiffies_to_usecs(hdev->timeout_jiffies),
1024                                         *cs_seq, &status, NULL);
1025                         if (ret) {
1026                                 if (ret == -ERESTARTSYS) {
1027                                         usleep_range(100, 200);
1028                                         goto wait_again;
1029                                 }
1030
1031                                 dev_err(hdev->dev,
1032                                         "Restore CS for context %d failed to complete %d\n",
1033                                         ctx->asid, ret);
1034                                 rc = -ENOEXEC;
1035                                 goto out;
1036                         }
1037                 }
1038
1039                 ctx->thread_ctx_switch_wait_token = 1;
1040
1041         } else if (!ctx->thread_ctx_switch_wait_token) {
1042                 rc = hl_poll_timeout_memory(hdev,
1043                         &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1044                         100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1045
1046                 if (rc == -ETIMEDOUT) {
1047                         dev_err(hdev->dev,
1048                                 "context switch phase timeout (%d)\n", tmp);
1049                         goto out;
1050                 }
1051         }
1052
1053 out:
1054         if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1055                 hl_device_reset(hdev, false, false);
1056
1057         return rc;
1058 }
1059
1060 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1061                 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
1062 {
1063         u64 *signal_seq_arr = NULL;
1064         u32 size_to_copy, signal_seq_arr_len;
1065         int rc = 0;
1066
1067         signal_seq_arr_len = chunk->num_signal_seq_arr;
1068
1069         /* currently only one signal seq is supported */
1070         if (signal_seq_arr_len != 1) {
1071                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1072                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1073                 dev_err(hdev->dev,
1074                         "Wait for signal CS supports only one signal CS seq\n");
1075                 return -EINVAL;
1076         }
1077
1078         signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1079                                         sizeof(*signal_seq_arr),
1080                                         GFP_ATOMIC);
1081         if (!signal_seq_arr) {
1082                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1083                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1084                 return -ENOMEM;
1085         }
1086
1087         size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
1088         if (copy_from_user(signal_seq_arr,
1089                                 u64_to_user_ptr(chunk->signal_seq_arr),
1090                                 size_to_copy)) {
1091                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1092                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1093                 dev_err(hdev->dev,
1094                         "Failed to copy signal seq array from user\n");
1095                 rc = -EFAULT;
1096                 goto out;
1097         }
1098
1099         /* currently it is guaranteed to have only one signal seq */
1100         *signal_seq = signal_seq_arr[0];
1101
1102 out:
1103         kfree(signal_seq_arr);
1104
1105         return rc;
1106 }
1107
1108 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1109                 struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
1110                 u32 q_idx)
1111 {
1112         struct hl_cs_counters_atomic *cntr;
1113         struct hl_cs_job *job;
1114         struct hl_cb *cb;
1115         u32 cb_size;
1116
1117         cntr = &hdev->aggregated_cs_counters;
1118
1119         job = hl_cs_allocate_job(hdev, q_type, true);
1120         if (!job) {
1121                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1122                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1123                 dev_err(hdev->dev, "Failed to allocate a new job\n");
1124                 return -ENOMEM;
1125         }
1126
1127         if (cs->type == CS_TYPE_WAIT)
1128                 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1129         else
1130                 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1131
1132         cb = hl_cb_kernel_create(hdev, cb_size,
1133                                 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1134         if (!cb) {
1135                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1136                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1137                 kfree(job);
1138                 return -EFAULT;
1139         }
1140
1141         job->id = 0;
1142         job->cs = cs;
1143         job->user_cb = cb;
1144         atomic_inc(&job->user_cb->cs_cnt);
1145         job->user_cb_size = cb_size;
1146         job->hw_queue_id = q_idx;
1147
1148         /*
1149          * No need in parsing, user CB is the patched CB.
1150          * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1151          * the CB idr anymore and to decrement its refcount as it was
1152          * incremented inside hl_cb_kernel_create().
1153          */
1154         job->patched_cb = job->user_cb;
1155         job->job_cb_size = job->user_cb_size;
1156         hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1157
1158         /* increment refcount as for external queues we get completion */
1159         cs_get(cs);
1160
1161         cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1162
1163         list_add_tail(&job->cs_node, &cs->job_list);
1164
1165         hl_debugfs_add_job(hdev, job);
1166
1167         return 0;
1168 }
1169
1170 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1171                                 void __user *chunks, u32 num_chunks,
1172                                 u64 *cs_seq, bool timestamp)
1173 {
1174         struct hl_cs_chunk *cs_chunk_array, *chunk;
1175         struct hw_queue_properties *hw_queue_prop;
1176         struct hl_device *hdev = hpriv->hdev;
1177         struct hl_cs_compl *sig_waitcs_cmpl;
1178         u32 q_idx, collective_engine_id = 0;
1179         struct hl_cs_counters_atomic *cntr;
1180         struct hl_fence *sig_fence = NULL;
1181         struct hl_ctx *ctx = hpriv->ctx;
1182         enum hl_queue_type q_type;
1183         struct hl_cs *cs;
1184         u64 signal_seq;
1185         int rc;
1186
1187         cntr = &hdev->aggregated_cs_counters;
1188         *cs_seq = ULLONG_MAX;
1189
1190         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1191                         ctx);
1192         if (rc)
1193                 goto out;
1194
1195         /* currently it is guaranteed to have only one chunk */
1196         chunk = &cs_chunk_array[0];
1197
1198         if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1199                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1200                 atomic64_inc(&cntr->validation_drop_cnt);
1201                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1202                         chunk->queue_index);
1203                 rc = -EINVAL;
1204                 goto free_cs_chunk_array;
1205         }
1206
1207         q_idx = chunk->queue_index;
1208         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1209         q_type = hw_queue_prop->type;
1210
1211         if (!hw_queue_prop->supports_sync_stream) {
1212                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1213                 atomic64_inc(&cntr->validation_drop_cnt);
1214                 dev_err(hdev->dev,
1215                         "Queue index %d does not support sync stream operations\n",
1216                         q_idx);
1217                 rc = -EINVAL;
1218                 goto free_cs_chunk_array;
1219         }
1220
1221         if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1222                 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1223                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1224                         atomic64_inc(&cntr->validation_drop_cnt);
1225                         dev_err(hdev->dev,
1226                                 "Queue index %d is invalid\n", q_idx);
1227                         rc = -EINVAL;
1228                         goto free_cs_chunk_array;
1229                 }
1230
1231                 collective_engine_id = chunk->collective_engine_id;
1232         }
1233
1234         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1235                 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
1236                 if (rc)
1237                         goto free_cs_chunk_array;
1238
1239                 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
1240                 if (IS_ERR(sig_fence)) {
1241                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1242                         atomic64_inc(&cntr->validation_drop_cnt);
1243                         dev_err(hdev->dev,
1244                                 "Failed to get signal CS with seq 0x%llx\n",
1245                                 signal_seq);
1246                         rc = PTR_ERR(sig_fence);
1247                         goto free_cs_chunk_array;
1248                 }
1249
1250                 if (!sig_fence) {
1251                         /* signal CS already finished */
1252                         rc = 0;
1253                         goto free_cs_chunk_array;
1254                 }
1255
1256                 sig_waitcs_cmpl =
1257                         container_of(sig_fence, struct hl_cs_compl, base_fence);
1258
1259                 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
1260                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1261                         atomic64_inc(&cntr->validation_drop_cnt);
1262                         dev_err(hdev->dev,
1263                                 "CS seq 0x%llx is not of a signal CS\n",
1264                                 signal_seq);
1265                         hl_fence_put(sig_fence);
1266                         rc = -EINVAL;
1267                         goto free_cs_chunk_array;
1268                 }
1269
1270                 if (completion_done(&sig_fence->completion)) {
1271                         /* signal CS already finished */
1272                         hl_fence_put(sig_fence);
1273                         rc = 0;
1274                         goto free_cs_chunk_array;
1275                 }
1276         }
1277
1278         /* increment refcnt for context */
1279         hl_ctx_get(hdev, ctx);
1280
1281         rc = allocate_cs(hdev, ctx, cs_type, &cs);
1282         if (rc) {
1283                 if (cs_type == CS_TYPE_WAIT ||
1284                         cs_type == CS_TYPE_COLLECTIVE_WAIT)
1285                         hl_fence_put(sig_fence);
1286                 hl_ctx_put(ctx);
1287                 goto free_cs_chunk_array;
1288         }
1289
1290         cs->timestamp = !!timestamp;
1291
1292         /*
1293          * Save the signal CS fence for later initialization right before
1294          * hanging the wait CS on the queue.
1295          */
1296         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
1297                 cs->signal_fence = sig_fence;
1298
1299         hl_debugfs_add_cs(cs);
1300
1301         *cs_seq = cs->sequence;
1302
1303         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
1304                 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
1305                                 q_idx);
1306         else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
1307                 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
1308                                 cs, q_idx, collective_engine_id);
1309         else {
1310                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1311                 atomic64_inc(&cntr->validation_drop_cnt);
1312                 rc = -EINVAL;
1313         }
1314
1315         if (rc)
1316                 goto free_cs_object;
1317
1318         rc = hl_hw_queue_schedule_cs(cs);
1319         if (rc) {
1320                 if (rc != -EAGAIN)
1321                         dev_err(hdev->dev,
1322                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1323                                 ctx->asid, cs->sequence, rc);
1324                 goto free_cs_object;
1325         }
1326
1327         rc = HL_CS_STATUS_SUCCESS;
1328         goto put_cs;
1329
1330 free_cs_object:
1331         cs_rollback(hdev, cs);
1332         *cs_seq = ULLONG_MAX;
1333         /* The path below is both for good and erroneous exits */
1334 put_cs:
1335         /* We finished with the CS in this function, so put the ref */
1336         cs_put(cs);
1337 free_cs_chunk_array:
1338         kfree(cs_chunk_array);
1339 out:
1340         return rc;
1341 }
1342
1343 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
1344 {
1345         union hl_cs_args *args = data;
1346         enum hl_cs_type cs_type;
1347         u64 cs_seq = ULONG_MAX;
1348         void __user *chunks;
1349         u32 num_chunks;
1350         int rc;
1351
1352         rc = hl_cs_sanity_checks(hpriv, args);
1353         if (rc)
1354                 goto out;
1355
1356         rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
1357         if (rc)
1358                 goto out;
1359
1360         cs_type = hl_cs_get_cs_type(args->in.cs_flags &
1361                                         ~HL_CS_FLAGS_FORCE_RESTORE);
1362         chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
1363         num_chunks = args->in.num_chunks_execute;
1364
1365         switch (cs_type) {
1366         case CS_TYPE_SIGNAL:
1367         case CS_TYPE_WAIT:
1368         case CS_TYPE_COLLECTIVE_WAIT:
1369                 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
1370                         &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
1371                 break;
1372         default:
1373                 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
1374                                 args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
1375                 break;
1376         }
1377
1378 out:
1379         if (rc != -EAGAIN) {
1380                 memset(args, 0, sizeof(*args));
1381                 args->out.status = rc;
1382                 args->out.seq = cs_seq;
1383         }
1384
1385         return rc;
1386 }
1387
1388 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
1389                                 u64 timeout_us, u64 seq,
1390                                 enum hl_cs_wait_status *status, s64 *timestamp)
1391 {
1392         struct hl_fence *fence;
1393         unsigned long timeout;
1394         int rc = 0;
1395         long completion_rc;
1396
1397         if (timestamp)
1398                 *timestamp = 0;
1399
1400         if (timeout_us == MAX_SCHEDULE_TIMEOUT)
1401                 timeout = timeout_us;
1402         else
1403                 timeout = usecs_to_jiffies(timeout_us);
1404
1405         hl_ctx_get(hdev, ctx);
1406
1407         fence = hl_ctx_get_fence(ctx, seq);
1408         if (IS_ERR(fence)) {
1409                 rc = PTR_ERR(fence);
1410                 if (rc == -EINVAL)
1411                         dev_notice_ratelimited(hdev->dev,
1412                                 "Can't wait on CS %llu because current CS is at seq %llu\n",
1413                                 seq, ctx->cs_sequence);
1414         } else if (fence) {
1415                 if (!timeout_us)
1416                         completion_rc = completion_done(&fence->completion);
1417                 else
1418                         completion_rc =
1419                                 wait_for_completion_interruptible_timeout(
1420                                         &fence->completion, timeout);
1421
1422                 if (completion_rc > 0) {
1423                         *status = CS_WAIT_STATUS_COMPLETED;
1424                         if (timestamp)
1425                                 *timestamp = ktime_to_ns(fence->timestamp);
1426                 } else {
1427                         *status = CS_WAIT_STATUS_BUSY;
1428                 }
1429
1430                 if (fence->error == -ETIMEDOUT)
1431                         rc = -ETIMEDOUT;
1432                 else if (fence->error == -EIO)
1433                         rc = -EIO;
1434
1435                 hl_fence_put(fence);
1436         } else {
1437                 dev_dbg(hdev->dev,
1438                         "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
1439                         seq, ctx->cs_sequence);
1440                 *status = CS_WAIT_STATUS_GONE;
1441         }
1442
1443         hl_ctx_put(ctx);
1444
1445         return rc;
1446 }
1447
1448 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
1449 {
1450         struct hl_device *hdev = hpriv->hdev;
1451         union hl_wait_cs_args *args = data;
1452         enum hl_cs_wait_status status;
1453         u64 seq = args->in.seq;
1454         s64 timestamp;
1455         int rc;
1456
1457         rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
1458                                 &status, &timestamp);
1459
1460         memset(args, 0, sizeof(*args));
1461
1462         if (rc) {
1463                 if (rc == -ERESTARTSYS) {
1464                         dev_err_ratelimited(hdev->dev,
1465                                 "user process got signal while waiting for CS handle %llu\n",
1466                                 seq);
1467                         args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
1468                         rc = -EINTR;
1469                 } else if (rc == -ETIMEDOUT) {
1470                         dev_err_ratelimited(hdev->dev,
1471                                 "CS %llu has timed-out while user process is waiting for it\n",
1472                                 seq);
1473                         args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
1474                 } else if (rc == -EIO) {
1475                         dev_err_ratelimited(hdev->dev,
1476                                 "CS %llu has been aborted while user process is waiting for it\n",
1477                                 seq);
1478                         args->out.status = HL_WAIT_CS_STATUS_ABORTED;
1479                 }
1480                 return rc;
1481         }
1482
1483         if (timestamp) {
1484                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
1485                 args->out.timestamp_nsec = timestamp;
1486         }
1487
1488         switch (status) {
1489         case CS_WAIT_STATUS_GONE:
1490                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
1491                 fallthrough;
1492         case CS_WAIT_STATUS_COMPLETED:
1493                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
1494                 break;
1495         case CS_WAIT_STATUS_BUSY:
1496         default:
1497                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
1498                 break;
1499         }
1500
1501         return 0;
1502 }