Merge tag 'io_uring-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / command_submission.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK   (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15                                 HL_CS_FLAGS_COLLECTIVE_WAIT)
16
17 /**
18  * enum hl_cs_wait_status - cs wait status
19  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20  * @CS_WAIT_STATUS_COMPLETED: cs completed
21  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
22  */
23 enum hl_cs_wait_status {
24         CS_WAIT_STATUS_BUSY,
25         CS_WAIT_STATUS_COMPLETED,
26         CS_WAIT_STATUS_GONE
27 };
28
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31                                 u64 timeout_us, u64 seq,
32                                 enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
34
35 static void hl_sob_reset(struct kref *ref)
36 {
37         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38                                                         kref);
39         struct hl_device *hdev = hw_sob->hdev;
40
41         dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
42
43         hdev->asic_funcs->reset_sob(hdev, hw_sob);
44
45         hw_sob->need_reset = false;
46 }
47
48 void hl_sob_reset_error(struct kref *ref)
49 {
50         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
51                                                         kref);
52         struct hl_device *hdev = hw_sob->hdev;
53
54         dev_crit(hdev->dev,
55                 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
56                 hw_sob->q_idx, hw_sob->sob_id);
57 }
58
59 void hw_sob_put(struct hl_hw_sob *hw_sob)
60 {
61         if (hw_sob)
62                 kref_put(&hw_sob->kref, hl_sob_reset);
63 }
64
65 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
66 {
67         if (hw_sob)
68                 kref_put(&hw_sob->kref, hl_sob_reset_error);
69 }
70
71 void hw_sob_get(struct hl_hw_sob *hw_sob)
72 {
73         if (hw_sob)
74                 kref_get(&hw_sob->kref);
75 }
76
77 /**
78  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
79  * @sob_base: sob base id
80  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
81  * @mask: generated mask
82  *
83  * Return: 0 if given parameters are valid
84  */
85 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
86 {
87         int i;
88
89         if (sob_mask == 0)
90                 return -EINVAL;
91
92         if (sob_mask == 0x1) {
93                 *mask = ~(1 << (sob_base & 0x7));
94         } else {
95                 /* find msb in order to verify sob range is valid */
96                 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
97                         if (BIT(i) & sob_mask)
98                                 break;
99
100                 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
101                         return -EINVAL;
102
103                 *mask = ~sob_mask;
104         }
105
106         return 0;
107 }
108
109 static void hl_fence_release(struct kref *kref)
110 {
111         struct hl_fence *fence =
112                 container_of(kref, struct hl_fence, refcount);
113         struct hl_cs_compl *hl_cs_cmpl =
114                 container_of(fence, struct hl_cs_compl, base_fence);
115
116         kfree(hl_cs_cmpl);
117 }
118
119 void hl_fence_put(struct hl_fence *fence)
120 {
121         if (IS_ERR_OR_NULL(fence))
122                 return;
123         kref_put(&fence->refcount, hl_fence_release);
124 }
125
126 void hl_fences_put(struct hl_fence **fence, int len)
127 {
128         int i;
129
130         for (i = 0; i < len; i++, fence++)
131                 hl_fence_put(*fence);
132 }
133
134 void hl_fence_get(struct hl_fence *fence)
135 {
136         if (fence)
137                 kref_get(&fence->refcount);
138 }
139
140 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
141 {
142         kref_init(&fence->refcount);
143         fence->cs_sequence = sequence;
144         fence->error = 0;
145         fence->timestamp = ktime_set(0, 0);
146         init_completion(&fence->completion);
147 }
148
149 void cs_get(struct hl_cs *cs)
150 {
151         kref_get(&cs->refcount);
152 }
153
154 static int cs_get_unless_zero(struct hl_cs *cs)
155 {
156         return kref_get_unless_zero(&cs->refcount);
157 }
158
159 static void cs_put(struct hl_cs *cs)
160 {
161         kref_put(&cs->refcount, cs_do_release);
162 }
163
164 static void cs_job_do_release(struct kref *ref)
165 {
166         struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
167
168         kfree(job);
169 }
170
171 static void cs_job_put(struct hl_cs_job *job)
172 {
173         kref_put(&job->refcount, cs_job_do_release);
174 }
175
176 bool cs_needs_completion(struct hl_cs *cs)
177 {
178         /* In case this is a staged CS, only the last CS in sequence should
179          * get a completion, any non staged CS will always get a completion
180          */
181         if (cs->staged_cs && !cs->staged_last)
182                 return false;
183
184         return true;
185 }
186
187 bool cs_needs_timeout(struct hl_cs *cs)
188 {
189         /* In case this is a staged CS, only the first CS in sequence should
190          * get a timeout, any non staged CS will always get a timeout
191          */
192         if (cs->staged_cs && !cs->staged_first)
193                 return false;
194
195         return true;
196 }
197
198 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
199 {
200         /*
201          * Patched CB is created for external queues jobs, and for H/W queues
202          * jobs if the user CB was allocated by driver and MMU is disabled.
203          */
204         return (job->queue_type == QUEUE_TYPE_EXT ||
205                         (job->queue_type == QUEUE_TYPE_HW &&
206                                         job->is_kernel_allocated_cb &&
207                                         !hdev->mmu_enable));
208 }
209
210 /*
211  * cs_parser - parse the user command submission
212  *
213  * @hpriv       : pointer to the private data of the fd
214  * @job        : pointer to the job that holds the command submission info
215  *
216  * The function parses the command submission of the user. It calls the
217  * ASIC specific parser, which returns a list of memory blocks to send
218  * to the device as different command buffers
219  *
220  */
221 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
222 {
223         struct hl_device *hdev = hpriv->hdev;
224         struct hl_cs_parser parser;
225         int rc;
226
227         parser.ctx_id = job->cs->ctx->asid;
228         parser.cs_sequence = job->cs->sequence;
229         parser.job_id = job->id;
230
231         parser.hw_queue_id = job->hw_queue_id;
232         parser.job_userptr_list = &job->userptr_list;
233         parser.patched_cb = NULL;
234         parser.user_cb = job->user_cb;
235         parser.user_cb_size = job->user_cb_size;
236         parser.queue_type = job->queue_type;
237         parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
238         job->patched_cb = NULL;
239         parser.completion = cs_needs_completion(job->cs);
240
241         rc = hdev->asic_funcs->cs_parser(hdev, &parser);
242
243         if (is_cb_patched(hdev, job)) {
244                 if (!rc) {
245                         job->patched_cb = parser.patched_cb;
246                         job->job_cb_size = parser.patched_cb_size;
247                         job->contains_dma_pkt = parser.contains_dma_pkt;
248                         atomic_inc(&job->patched_cb->cs_cnt);
249                 }
250
251                 /*
252                  * Whether the parsing worked or not, we don't need the
253                  * original CB anymore because it was already parsed and
254                  * won't be accessed again for this CS
255                  */
256                 atomic_dec(&job->user_cb->cs_cnt);
257                 hl_cb_put(job->user_cb);
258                 job->user_cb = NULL;
259         } else if (!rc) {
260                 job->job_cb_size = job->user_cb_size;
261         }
262
263         return rc;
264 }
265
266 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
267 {
268         struct hl_cs *cs = job->cs;
269
270         if (is_cb_patched(hdev, job)) {
271                 hl_userptr_delete_list(hdev, &job->userptr_list);
272
273                 /*
274                  * We might arrive here from rollback and patched CB wasn't
275                  * created, so we need to check it's not NULL
276                  */
277                 if (job->patched_cb) {
278                         atomic_dec(&job->patched_cb->cs_cnt);
279                         hl_cb_put(job->patched_cb);
280                 }
281         }
282
283         /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
284          * enabled, the user CB isn't released in cs_parser() and thus should be
285          * released here.
286          * This is also true for INT queues jobs which were allocated by driver
287          */
288         if (job->is_kernel_allocated_cb &&
289                 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
290                                 job->queue_type == QUEUE_TYPE_INT)) {
291                 atomic_dec(&job->user_cb->cs_cnt);
292                 hl_cb_put(job->user_cb);
293         }
294
295         /*
296          * This is the only place where there can be multiple threads
297          * modifying the list at the same time
298          */
299         spin_lock(&cs->job_lock);
300         list_del(&job->cs_node);
301         spin_unlock(&cs->job_lock);
302
303         hl_debugfs_remove_job(hdev, job);
304
305         /* We decrement reference only for a CS that gets completion
306          * because the reference was incremented only for this kind of CS
307          * right before it was scheduled.
308          *
309          * In staged submission, only the last CS marked as 'staged_last'
310          * gets completion, hence its release function will be called from here.
311          * As for all the rest CS's in the staged submission which do not get
312          * completion, their CS reference will be decremented by the
313          * 'staged_last' CS during the CS release flow.
314          * All relevant PQ CI counters will be incremented during the CS release
315          * flow by calling 'hl_hw_queue_update_ci'.
316          */
317         if (cs_needs_completion(cs) &&
318                 (job->queue_type == QUEUE_TYPE_EXT ||
319                         job->queue_type == QUEUE_TYPE_HW))
320                 cs_put(cs);
321
322         cs_job_put(job);
323 }
324
325 /*
326  * hl_staged_cs_find_first - locate the first CS in this staged submission
327  *
328  * @hdev: pointer to device structure
329  * @cs_seq: staged submission sequence number
330  *
331  * @note: This function must be called under 'hdev->cs_mirror_lock'
332  *
333  * Find and return a CS pointer with the given sequence
334  */
335 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
336 {
337         struct hl_cs *cs;
338
339         list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
340                 if (cs->staged_cs && cs->staged_first &&
341                                 cs->sequence == cs_seq)
342                         return cs;
343
344         return NULL;
345 }
346
347 /*
348  * is_staged_cs_last_exists - returns true if the last CS in sequence exists
349  *
350  * @hdev: pointer to device structure
351  * @cs: staged submission member
352  *
353  */
354 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
355 {
356         struct hl_cs *last_entry;
357
358         last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
359                                                                 staged_cs_node);
360
361         if (last_entry->staged_last)
362                 return true;
363
364         return false;
365 }
366
367 /*
368  * staged_cs_get - get CS reference if this CS is a part of a staged CS
369  *
370  * @hdev: pointer to device structure
371  * @cs: current CS
372  * @cs_seq: staged submission sequence number
373  *
374  * Increment CS reference for every CS in this staged submission except for
375  * the CS which get completion.
376  */
377 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
378 {
379         /* Only the last CS in this staged submission will get a completion.
380          * We must increment the reference for all other CS's in this
381          * staged submission.
382          * Once we get a completion we will release the whole staged submission.
383          */
384         if (!cs->staged_last)
385                 cs_get(cs);
386 }
387
388 /*
389  * staged_cs_put - put a CS in case it is part of staged submission
390  *
391  * @hdev: pointer to device structure
392  * @cs: CS to put
393  *
394  * This function decrements a CS reference (for a non completion CS)
395  */
396 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
397 {
398         /* We release all CS's in a staged submission except the last
399          * CS which we have never incremented its reference.
400          */
401         if (!cs_needs_completion(cs))
402                 cs_put(cs);
403 }
404
405 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
406 {
407         bool next_entry_found = false;
408         struct hl_cs *next;
409
410         if (!cs_needs_timeout(cs))
411                 return;
412
413         spin_lock(&hdev->cs_mirror_lock);
414
415         /* We need to handle tdr only once for the complete staged submission.
416          * Hence, we choose the CS that reaches this function first which is
417          * the CS marked as 'staged_last'.
418          */
419         if (cs->staged_cs && cs->staged_last)
420                 cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
421
422         spin_unlock(&hdev->cs_mirror_lock);
423
424         /* Don't cancel TDR in case this CS was timedout because we might be
425          * running from the TDR context
426          */
427         if (cs && (cs->timedout ||
428                         hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
429                 return;
430
431         if (cs && cs->tdr_active)
432                 cancel_delayed_work_sync(&cs->work_tdr);
433
434         spin_lock(&hdev->cs_mirror_lock);
435
436         /* queue TDR for next CS */
437         list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
438                 if (cs_needs_timeout(next)) {
439                         next_entry_found = true;
440                         break;
441                 }
442
443         if (next_entry_found && !next->tdr_active) {
444                 next->tdr_active = true;
445                 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
446         }
447
448         spin_unlock(&hdev->cs_mirror_lock);
449 }
450
451 /*
452  * force_complete_multi_cs - complete all contexts that wait on multi-CS
453  *
454  * @hdev: pointer to habanalabs device structure
455  */
456 static void force_complete_multi_cs(struct hl_device *hdev)
457 {
458         int i;
459
460         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
461                 struct multi_cs_completion *mcs_compl;
462
463                 mcs_compl = &hdev->multi_cs_completion[i];
464
465                 spin_lock(&mcs_compl->lock);
466
467                 if (!mcs_compl->used) {
468                         spin_unlock(&mcs_compl->lock);
469                         continue;
470                 }
471
472                 /* when calling force complete no context should be waiting on
473                  * multi-cS.
474                  * We are calling the function as a protection for such case
475                  * to free any pending context and print error message
476                  */
477                 dev_err(hdev->dev,
478                                 "multi-CS completion context %d still waiting when calling force completion\n",
479                                 i);
480                 complete_all(&mcs_compl->completion);
481                 spin_unlock(&mcs_compl->lock);
482         }
483 }
484
485 /*
486  * complete_multi_cs - complete all waiting entities on multi-CS
487  *
488  * @hdev: pointer to habanalabs device structure
489  * @cs: CS structure
490  * The function signals a waiting entity that has an overlapping stream masters
491  * with the completed CS.
492  * For example:
493  * - a completed CS worked on stream master QID 4, multi CS completion
494  *   is actively waiting on stream master QIDs 3, 5. don't send signal as no
495  *   common stream master QID
496  * - a completed CS worked on stream master QID 4, multi CS completion
497  *   is actively waiting on stream master QIDs 3, 4. send signal as stream
498  *   master QID 4 is common
499  */
500 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
501 {
502         struct hl_fence *fence = cs->fence;
503         int i;
504
505         /* in case of multi CS check for completion only for the first CS */
506         if (cs->staged_cs && !cs->staged_first)
507                 return;
508
509         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
510                 struct multi_cs_completion *mcs_compl;
511
512                 mcs_compl = &hdev->multi_cs_completion[i];
513                 if (!mcs_compl->used)
514                         continue;
515
516                 spin_lock(&mcs_compl->lock);
517
518                 /*
519                  * complete if:
520                  * 1. still waiting for completion
521                  * 2. the completed CS has at least one overlapping stream
522                  *    master with the stream masters in the completion
523                  */
524                 if (mcs_compl->used &&
525                                 (fence->stream_master_qid_map &
526                                         mcs_compl->stream_master_qid_map)) {
527                         /* extract the timestamp only of first completed CS */
528                         if (!mcs_compl->timestamp)
529                                 mcs_compl->timestamp =
530                                                 ktime_to_ns(fence->timestamp);
531                         complete_all(&mcs_compl->completion);
532                 }
533
534                 spin_unlock(&mcs_compl->lock);
535         }
536 }
537
538 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
539                                         struct hl_cs *cs,
540                                         struct hl_cs_compl *hl_cs_cmpl)
541 {
542         /* Skip this handler if the cs wasn't submitted, to avoid putting
543          * the hw_sob twice, since this case already handled at this point,
544          * also skip if the hw_sob pointer wasn't set.
545          */
546         if (!hl_cs_cmpl->hw_sob || !cs->submitted)
547                 return;
548
549         spin_lock(&hl_cs_cmpl->lock);
550
551         /*
552          * we get refcount upon reservation of signals or signal/wait cs for the
553          * hw_sob object, and need to put it when the first staged cs
554          * (which cotains the encaps signals) or cs signal/wait is completed.
555          */
556         if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
557                         (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
558                         (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
559                         (!!hl_cs_cmpl->encaps_signals)) {
560                 dev_dbg(hdev->dev,
561                                 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
562                                 hl_cs_cmpl->cs_seq,
563                                 hl_cs_cmpl->type,
564                                 hl_cs_cmpl->hw_sob->sob_id,
565                                 hl_cs_cmpl->sob_val);
566
567                 hw_sob_put(hl_cs_cmpl->hw_sob);
568
569                 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
570                         hdev->asic_funcs->reset_sob_group(hdev,
571                                         hl_cs_cmpl->sob_group);
572         }
573
574         spin_unlock(&hl_cs_cmpl->lock);
575 }
576
577 static void cs_do_release(struct kref *ref)
578 {
579         struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
580         struct hl_device *hdev = cs->ctx->hdev;
581         struct hl_cs_job *job, *tmp;
582         struct hl_cs_compl *hl_cs_cmpl =
583                         container_of(cs->fence, struct hl_cs_compl, base_fence);
584
585         cs->completed = true;
586
587         /*
588          * Although if we reached here it means that all external jobs have
589          * finished, because each one of them took refcnt to CS, we still
590          * need to go over the internal jobs and complete them. Otherwise, we
591          * will have leaked memory and what's worse, the CS object (and
592          * potentially the CTX object) could be released, while the JOB
593          * still holds a pointer to them (but no reference).
594          */
595         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
596                 complete_job(hdev, job);
597
598         if (!cs->submitted) {
599                 /*
600                  * In case the wait for signal CS was submitted, the fence put
601                  * occurs in init_signal_wait_cs() or collective_wait_init_cs()
602                  * right before hanging on the PQ.
603                  */
604                 if (cs->type == CS_TYPE_WAIT ||
605                                 cs->type == CS_TYPE_COLLECTIVE_WAIT)
606                         hl_fence_put(cs->signal_fence);
607
608                 goto out;
609         }
610
611         /* Need to update CI for all queue jobs that does not get completion */
612         hl_hw_queue_update_ci(cs);
613
614         /* remove CS from CS mirror list */
615         spin_lock(&hdev->cs_mirror_lock);
616         list_del_init(&cs->mirror_node);
617         spin_unlock(&hdev->cs_mirror_lock);
618
619         cs_handle_tdr(hdev, cs);
620
621         if (cs->staged_cs) {
622                 /* the completion CS decrements reference for the entire
623                  * staged submission
624                  */
625                 if (cs->staged_last) {
626                         struct hl_cs *staged_cs, *tmp;
627
628                         list_for_each_entry_safe(staged_cs, tmp,
629                                         &cs->staged_cs_node, staged_cs_node)
630                                 staged_cs_put(hdev, staged_cs);
631                 }
632
633                 /* A staged CS will be a member in the list only after it
634                  * was submitted. We used 'cs_mirror_lock' when inserting
635                  * it to list so we will use it again when removing it
636                  */
637                 if (cs->submitted) {
638                         spin_lock(&hdev->cs_mirror_lock);
639                         list_del(&cs->staged_cs_node);
640                         spin_unlock(&hdev->cs_mirror_lock);
641                 }
642
643                 /* decrement refcount to handle when first staged cs
644                  * with encaps signals is completed.
645                  */
646                 if (hl_cs_cmpl->encaps_signals)
647                         kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
648                                                 hl_encaps_handle_do_release);
649         }
650
651         if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
652                         && cs->encaps_signals)
653                 kref_put(&cs->encaps_sig_hdl->refcount,
654                                         hl_encaps_handle_do_release);
655
656 out:
657         /* Must be called before hl_ctx_put because inside we use ctx to get
658          * the device
659          */
660         hl_debugfs_remove_cs(cs);
661
662         hl_ctx_put(cs->ctx);
663
664         /* We need to mark an error for not submitted because in that case
665          * the hl fence release flow is different. Mainly, we don't need
666          * to handle hw_sob for signal/wait
667          */
668         if (cs->timedout)
669                 cs->fence->error = -ETIMEDOUT;
670         else if (cs->aborted)
671                 cs->fence->error = -EIO;
672         else if (!cs->submitted)
673                 cs->fence->error = -EBUSY;
674
675         if (unlikely(cs->skip_reset_on_timeout)) {
676                 dev_err(hdev->dev,
677                         "Command submission %llu completed after %llu (s)\n",
678                         cs->sequence,
679                         div_u64(jiffies - cs->submission_time_jiffies, HZ));
680         }
681
682         if (cs->timestamp)
683                 cs->fence->timestamp = ktime_get();
684         complete_all(&cs->fence->completion);
685         complete_multi_cs(hdev, cs);
686
687         cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
688
689         hl_fence_put(cs->fence);
690
691         kfree(cs->jobs_in_queue_cnt);
692         kfree(cs);
693 }
694
695 static void cs_timedout(struct work_struct *work)
696 {
697         struct hl_device *hdev;
698         int rc;
699         struct hl_cs *cs = container_of(work, struct hl_cs,
700                                                  work_tdr.work);
701         bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
702
703         rc = cs_get_unless_zero(cs);
704         if (!rc)
705                 return;
706
707         if ((!cs->submitted) || (cs->completed)) {
708                 cs_put(cs);
709                 return;
710         }
711
712         /* Mark the CS is timed out so we won't try to cancel its TDR */
713         if (likely(!skip_reset_on_timeout))
714                 cs->timedout = true;
715
716         hdev = cs->ctx->hdev;
717
718         switch (cs->type) {
719         case CS_TYPE_SIGNAL:
720                 dev_err(hdev->dev,
721                         "Signal command submission %llu has not finished in time!\n",
722                         cs->sequence);
723                 break;
724
725         case CS_TYPE_WAIT:
726                 dev_err(hdev->dev,
727                         "Wait command submission %llu has not finished in time!\n",
728                         cs->sequence);
729                 break;
730
731         case CS_TYPE_COLLECTIVE_WAIT:
732                 dev_err(hdev->dev,
733                         "Collective Wait command submission %llu has not finished in time!\n",
734                         cs->sequence);
735                 break;
736
737         default:
738                 dev_err(hdev->dev,
739                         "Command submission %llu has not finished in time!\n",
740                         cs->sequence);
741                 break;
742         }
743
744         rc = hl_state_dump(hdev);
745         if (rc)
746                 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
747
748         cs_put(cs);
749
750         if (likely(!skip_reset_on_timeout)) {
751                 if (hdev->reset_on_lockup)
752                         hl_device_reset(hdev, HL_RESET_TDR);
753                 else
754                         hdev->needs_reset = true;
755         }
756 }
757
758 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
759                         enum hl_cs_type cs_type, u64 user_sequence,
760                         struct hl_cs **cs_new, u32 flags, u32 timeout)
761 {
762         struct hl_cs_counters_atomic *cntr;
763         struct hl_fence *other = NULL;
764         struct hl_cs_compl *cs_cmpl;
765         struct hl_cs *cs;
766         int rc;
767
768         cntr = &hdev->aggregated_cs_counters;
769
770         cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
771         if (!cs)
772                 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
773
774         if (!cs) {
775                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
776                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
777                 return -ENOMEM;
778         }
779
780         /* increment refcnt for context */
781         hl_ctx_get(hdev, ctx);
782
783         cs->ctx = ctx;
784         cs->submitted = false;
785         cs->completed = false;
786         cs->type = cs_type;
787         cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
788         cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
789         cs->timeout_jiffies = timeout;
790         cs->skip_reset_on_timeout =
791                 hdev->skip_reset_on_timeout ||
792                 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
793         cs->submission_time_jiffies = jiffies;
794         INIT_LIST_HEAD(&cs->job_list);
795         INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
796         kref_init(&cs->refcount);
797         spin_lock_init(&cs->job_lock);
798
799         cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
800         if (!cs_cmpl)
801                 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
802
803         if (!cs_cmpl) {
804                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
805                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
806                 rc = -ENOMEM;
807                 goto free_cs;
808         }
809
810         cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
811                         sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
812         if (!cs->jobs_in_queue_cnt)
813                 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
814                                 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
815
816         if (!cs->jobs_in_queue_cnt) {
817                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
818                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
819                 rc = -ENOMEM;
820                 goto free_cs_cmpl;
821         }
822
823         cs_cmpl->hdev = hdev;
824         cs_cmpl->type = cs->type;
825         spin_lock_init(&cs_cmpl->lock);
826         cs->fence = &cs_cmpl->base_fence;
827
828         spin_lock(&ctx->cs_lock);
829
830         cs_cmpl->cs_seq = ctx->cs_sequence;
831         other = ctx->cs_pending[cs_cmpl->cs_seq &
832                                 (hdev->asic_prop.max_pending_cs - 1)];
833
834         if (other && !completion_done(&other->completion)) {
835                 /* If the following statement is true, it means we have reached
836                  * a point in which only part of the staged submission was
837                  * submitted and we don't have enough room in the 'cs_pending'
838                  * array for the rest of the submission.
839                  * This causes a deadlock because this CS will never be
840                  * completed as it depends on future CS's for completion.
841                  */
842                 if (other->cs_sequence == user_sequence)
843                         dev_crit_ratelimited(hdev->dev,
844                                 "Staged CS %llu deadlock due to lack of resources",
845                                 user_sequence);
846
847                 dev_dbg_ratelimited(hdev->dev,
848                         "Rejecting CS because of too many in-flights CS\n");
849                 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
850                 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
851                 rc = -EAGAIN;
852                 goto free_fence;
853         }
854
855         /* init hl_fence */
856         hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
857
858         cs->sequence = cs_cmpl->cs_seq;
859
860         ctx->cs_pending[cs_cmpl->cs_seq &
861                         (hdev->asic_prop.max_pending_cs - 1)] =
862                                                         &cs_cmpl->base_fence;
863         ctx->cs_sequence++;
864
865         hl_fence_get(&cs_cmpl->base_fence);
866
867         hl_fence_put(other);
868
869         spin_unlock(&ctx->cs_lock);
870
871         *cs_new = cs;
872
873         return 0;
874
875 free_fence:
876         spin_unlock(&ctx->cs_lock);
877         kfree(cs->jobs_in_queue_cnt);
878 free_cs_cmpl:
879         kfree(cs_cmpl);
880 free_cs:
881         kfree(cs);
882         hl_ctx_put(ctx);
883         return rc;
884 }
885
886 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
887 {
888         struct hl_cs_job *job, *tmp;
889
890         staged_cs_put(hdev, cs);
891
892         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
893                 complete_job(hdev, job);
894 }
895
896 void hl_cs_rollback_all(struct hl_device *hdev)
897 {
898         int i;
899         struct hl_cs *cs, *tmp;
900
901         flush_workqueue(hdev->sob_reset_wq);
902
903         /* flush all completions before iterating over the CS mirror list in
904          * order to avoid a race with the release functions
905          */
906         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
907                 flush_workqueue(hdev->cq_wq[i]);
908
909         /* Make sure we don't have leftovers in the CS mirror list */
910         list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
911                 cs_get(cs);
912                 cs->aborted = true;
913                 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
914                                 cs->ctx->asid, cs->sequence);
915                 cs_rollback(hdev, cs);
916                 cs_put(cs);
917         }
918
919         force_complete_multi_cs(hdev);
920 }
921
922 static void
923 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
924 {
925         struct hl_user_pending_interrupt *pend;
926         unsigned long flags;
927
928         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
929         list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
930                 pend->fence.error = -EIO;
931                 complete_all(&pend->fence.completion);
932         }
933         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
934 }
935
936 void hl_release_pending_user_interrupts(struct hl_device *hdev)
937 {
938         struct asic_fixed_properties *prop = &hdev->asic_prop;
939         struct hl_user_interrupt *interrupt;
940         int i;
941
942         if (!prop->user_interrupt_count)
943                 return;
944
945         /* We iterate through the user interrupt requests and waking up all
946          * user threads waiting for interrupt completion. We iterate the
947          * list under a lock, this is why all user threads, once awake,
948          * will wait on the same lock and will release the waiting object upon
949          * unlock.
950          */
951
952         for (i = 0 ; i < prop->user_interrupt_count ; i++) {
953                 interrupt = &hdev->user_interrupt[i];
954                 wake_pending_user_interrupt_threads(interrupt);
955         }
956
957         interrupt = &hdev->common_user_interrupt;
958         wake_pending_user_interrupt_threads(interrupt);
959 }
960
961 static void job_wq_completion(struct work_struct *work)
962 {
963         struct hl_cs_job *job = container_of(work, struct hl_cs_job,
964                                                 finish_work);
965         struct hl_cs *cs = job->cs;
966         struct hl_device *hdev = cs->ctx->hdev;
967
968         /* job is no longer needed */
969         complete_job(hdev, job);
970 }
971
972 static int validate_queue_index(struct hl_device *hdev,
973                                 struct hl_cs_chunk *chunk,
974                                 enum hl_queue_type *queue_type,
975                                 bool *is_kernel_allocated_cb)
976 {
977         struct asic_fixed_properties *asic = &hdev->asic_prop;
978         struct hw_queue_properties *hw_queue_prop;
979
980         /* This must be checked here to prevent out-of-bounds access to
981          * hw_queues_props array
982          */
983         if (chunk->queue_index >= asic->max_queues) {
984                 dev_err(hdev->dev, "Queue index %d is invalid\n",
985                         chunk->queue_index);
986                 return -EINVAL;
987         }
988
989         hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
990
991         if (hw_queue_prop->type == QUEUE_TYPE_NA) {
992                 dev_err(hdev->dev, "Queue index %d is invalid\n",
993                         chunk->queue_index);
994                 return -EINVAL;
995         }
996
997         if (hw_queue_prop->driver_only) {
998                 dev_err(hdev->dev,
999                         "Queue index %d is restricted for the kernel driver\n",
1000                         chunk->queue_index);
1001                 return -EINVAL;
1002         }
1003
1004         /* When hw queue type isn't QUEUE_TYPE_HW,
1005          * USER_ALLOC_CB flag shall be referred as "don't care".
1006          */
1007         if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1008                 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1009                         if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1010                                 dev_err(hdev->dev,
1011                                         "Queue index %d doesn't support user CB\n",
1012                                         chunk->queue_index);
1013                                 return -EINVAL;
1014                         }
1015
1016                         *is_kernel_allocated_cb = false;
1017                 } else {
1018                         if (!(hw_queue_prop->cb_alloc_flags &
1019                                         CB_ALLOC_KERNEL)) {
1020                                 dev_err(hdev->dev,
1021                                         "Queue index %d doesn't support kernel CB\n",
1022                                         chunk->queue_index);
1023                                 return -EINVAL;
1024                         }
1025
1026                         *is_kernel_allocated_cb = true;
1027                 }
1028         } else {
1029                 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1030                                                 & CB_ALLOC_KERNEL);
1031         }
1032
1033         *queue_type = hw_queue_prop->type;
1034         return 0;
1035 }
1036
1037 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1038                                         struct hl_cb_mgr *cb_mgr,
1039                                         struct hl_cs_chunk *chunk)
1040 {
1041         struct hl_cb *cb;
1042         u32 cb_handle;
1043
1044         cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
1045
1046         cb = hl_cb_get(hdev, cb_mgr, cb_handle);
1047         if (!cb) {
1048                 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
1049                 return NULL;
1050         }
1051
1052         if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1053                 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1054                 goto release_cb;
1055         }
1056
1057         atomic_inc(&cb->cs_cnt);
1058
1059         return cb;
1060
1061 release_cb:
1062         hl_cb_put(cb);
1063         return NULL;
1064 }
1065
1066 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1067                 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1068 {
1069         struct hl_cs_job *job;
1070
1071         job = kzalloc(sizeof(*job), GFP_ATOMIC);
1072         if (!job)
1073                 job = kzalloc(sizeof(*job), GFP_KERNEL);
1074
1075         if (!job)
1076                 return NULL;
1077
1078         kref_init(&job->refcount);
1079         job->queue_type = queue_type;
1080         job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1081
1082         if (is_cb_patched(hdev, job))
1083                 INIT_LIST_HEAD(&job->userptr_list);
1084
1085         if (job->queue_type == QUEUE_TYPE_EXT)
1086                 INIT_WORK(&job->finish_work, job_wq_completion);
1087
1088         return job;
1089 }
1090
1091 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1092 {
1093         if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1094                 return CS_TYPE_SIGNAL;
1095         else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1096                 return CS_TYPE_WAIT;
1097         else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1098                 return CS_TYPE_COLLECTIVE_WAIT;
1099         else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1100                 return CS_RESERVE_SIGNALS;
1101         else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1102                 return CS_UNRESERVE_SIGNALS;
1103         else
1104                 return CS_TYPE_DEFAULT;
1105 }
1106
1107 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1108 {
1109         struct hl_device *hdev = hpriv->hdev;
1110         struct hl_ctx *ctx = hpriv->ctx;
1111         u32 cs_type_flags, num_chunks;
1112         enum hl_device_status status;
1113         enum hl_cs_type cs_type;
1114
1115         if (!hl_device_operational(hdev, &status)) {
1116                 dev_warn_ratelimited(hdev->dev,
1117                         "Device is %s. Can't submit new CS\n",
1118                         hdev->status[status]);
1119                 return -EBUSY;
1120         }
1121
1122         if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1123                         !hdev->supports_staged_submission) {
1124                 dev_err(hdev->dev, "staged submission not supported");
1125                 return -EPERM;
1126         }
1127
1128         cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1129
1130         if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1131                 dev_err(hdev->dev,
1132                         "CS type flags are mutually exclusive, context %d\n",
1133                         ctx->asid);
1134                 return -EINVAL;
1135         }
1136
1137         cs_type = hl_cs_get_cs_type(cs_type_flags);
1138         num_chunks = args->in.num_chunks_execute;
1139
1140         if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
1141                                         !hdev->supports_sync_stream)) {
1142                 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1143                 return -EINVAL;
1144         }
1145
1146         if (cs_type == CS_TYPE_DEFAULT) {
1147                 if (!num_chunks) {
1148                         dev_err(hdev->dev,
1149                                 "Got execute CS with 0 chunks, context %d\n",
1150                                 ctx->asid);
1151                         return -EINVAL;
1152                 }
1153         } else if (num_chunks != 1) {
1154                 dev_err(hdev->dev,
1155                         "Sync stream CS mandates one chunk only, context %d\n",
1156                         ctx->asid);
1157                 return -EINVAL;
1158         }
1159
1160         return 0;
1161 }
1162
1163 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1164                                         struct hl_cs_chunk **cs_chunk_array,
1165                                         void __user *chunks, u32 num_chunks,
1166                                         struct hl_ctx *ctx)
1167 {
1168         u32 size_to_copy;
1169
1170         if (num_chunks > HL_MAX_JOBS_PER_CS) {
1171                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1172                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1173                 dev_err(hdev->dev,
1174                         "Number of chunks can NOT be larger than %d\n",
1175                         HL_MAX_JOBS_PER_CS);
1176                 return -EINVAL;
1177         }
1178
1179         *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1180                                         GFP_ATOMIC);
1181         if (!*cs_chunk_array)
1182                 *cs_chunk_array = kmalloc_array(num_chunks,
1183                                         sizeof(**cs_chunk_array), GFP_KERNEL);
1184         if (!*cs_chunk_array) {
1185                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1186                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1187                 return -ENOMEM;
1188         }
1189
1190         size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1191         if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1192                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1193                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1194                 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1195                 kfree(*cs_chunk_array);
1196                 return -EFAULT;
1197         }
1198
1199         return 0;
1200 }
1201
1202 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1203                                 u64 sequence, u32 flags,
1204                                 u32 encaps_signal_handle)
1205 {
1206         if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1207                 return 0;
1208
1209         cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1210         cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1211
1212         if (cs->staged_first) {
1213                 /* Staged CS sequence is the first CS sequence */
1214                 INIT_LIST_HEAD(&cs->staged_cs_node);
1215                 cs->staged_sequence = cs->sequence;
1216
1217                 if (cs->encaps_signals)
1218                         cs->encaps_sig_hdl_id = encaps_signal_handle;
1219         } else {
1220                 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1221                  * under the cs_mirror_lock
1222                  */
1223                 cs->staged_sequence = sequence;
1224         }
1225
1226         /* Increment CS reference if needed */
1227         staged_cs_get(hdev, cs);
1228
1229         cs->staged_cs = true;
1230
1231         return 0;
1232 }
1233
1234 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1235 {
1236         int i;
1237
1238         for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1239                 if (qid == hdev->stream_master_qid_arr[i])
1240                         return BIT(i);
1241
1242         return 0;
1243 }
1244
1245 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1246                                 u32 num_chunks, u64 *cs_seq, u32 flags,
1247                                 u32 encaps_signals_handle, u32 timeout)
1248 {
1249         bool staged_mid, int_queues_only = true;
1250         struct hl_device *hdev = hpriv->hdev;
1251         struct hl_cs_chunk *cs_chunk_array;
1252         struct hl_cs_counters_atomic *cntr;
1253         struct hl_ctx *ctx = hpriv->ctx;
1254         struct hl_cs_job *job;
1255         struct hl_cs *cs;
1256         struct hl_cb *cb;
1257         u64 user_sequence;
1258         u8 stream_master_qid_map = 0;
1259         int rc, i;
1260
1261         cntr = &hdev->aggregated_cs_counters;
1262         user_sequence = *cs_seq;
1263         *cs_seq = ULLONG_MAX;
1264
1265         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1266                         hpriv->ctx);
1267         if (rc)
1268                 goto out;
1269
1270         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1271                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1272                 staged_mid = true;
1273         else
1274                 staged_mid = false;
1275
1276         rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1277                         staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1278                         timeout);
1279         if (rc)
1280                 goto free_cs_chunk_array;
1281
1282         *cs_seq = cs->sequence;
1283
1284         hl_debugfs_add_cs(cs);
1285
1286         rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1287                                                 encaps_signals_handle);
1288         if (rc)
1289                 goto free_cs_object;
1290
1291         /* Validate ALL the CS chunks before submitting the CS */
1292         for (i = 0 ; i < num_chunks ; i++) {
1293                 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1294                 enum hl_queue_type queue_type;
1295                 bool is_kernel_allocated_cb;
1296
1297                 rc = validate_queue_index(hdev, chunk, &queue_type,
1298                                                 &is_kernel_allocated_cb);
1299                 if (rc) {
1300                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1301                         atomic64_inc(&cntr->validation_drop_cnt);
1302                         goto free_cs_object;
1303                 }
1304
1305                 if (is_kernel_allocated_cb) {
1306                         cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
1307                         if (!cb) {
1308                                 atomic64_inc(
1309                                         &ctx->cs_counters.validation_drop_cnt);
1310                                 atomic64_inc(&cntr->validation_drop_cnt);
1311                                 rc = -EINVAL;
1312                                 goto free_cs_object;
1313                         }
1314                 } else {
1315                         cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1316                 }
1317
1318                 if (queue_type == QUEUE_TYPE_EXT ||
1319                                                 queue_type == QUEUE_TYPE_HW) {
1320                         int_queues_only = false;
1321
1322                         /*
1323                          * store which stream are being used for external/HW
1324                          * queues of this CS
1325                          */
1326                         if (hdev->supports_wait_for_multi_cs)
1327                                 stream_master_qid_map |=
1328                                         get_stream_master_qid_mask(hdev,
1329                                                         chunk->queue_index);
1330                 }
1331
1332                 job = hl_cs_allocate_job(hdev, queue_type,
1333                                                 is_kernel_allocated_cb);
1334                 if (!job) {
1335                         atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1336                         atomic64_inc(&cntr->out_of_mem_drop_cnt);
1337                         dev_err(hdev->dev, "Failed to allocate a new job\n");
1338                         rc = -ENOMEM;
1339                         if (is_kernel_allocated_cb)
1340                                 goto release_cb;
1341
1342                         goto free_cs_object;
1343                 }
1344
1345                 job->id = i + 1;
1346                 job->cs = cs;
1347                 job->user_cb = cb;
1348                 job->user_cb_size = chunk->cb_size;
1349                 job->hw_queue_id = chunk->queue_index;
1350
1351                 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1352
1353                 list_add_tail(&job->cs_node, &cs->job_list);
1354
1355                 /*
1356                  * Increment CS reference. When CS reference is 0, CS is
1357                  * done and can be signaled to user and free all its resources
1358                  * Only increment for JOB on external or H/W queues, because
1359                  * only for those JOBs we get completion
1360                  */
1361                 if (cs_needs_completion(cs) &&
1362                         (job->queue_type == QUEUE_TYPE_EXT ||
1363                                 job->queue_type == QUEUE_TYPE_HW))
1364                         cs_get(cs);
1365
1366                 hl_debugfs_add_job(hdev, job);
1367
1368                 rc = cs_parser(hpriv, job);
1369                 if (rc) {
1370                         atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1371                         atomic64_inc(&cntr->parsing_drop_cnt);
1372                         dev_err(hdev->dev,
1373                                 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1374                                 cs->ctx->asid, cs->sequence, job->id, rc);
1375                         goto free_cs_object;
1376                 }
1377         }
1378
1379         /* We allow a CS with any queue type combination as long as it does
1380          * not get a completion
1381          */
1382         if (int_queues_only && cs_needs_completion(cs)) {
1383                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1384                 atomic64_inc(&cntr->validation_drop_cnt);
1385                 dev_err(hdev->dev,
1386                         "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1387                         cs->ctx->asid, cs->sequence);
1388                 rc = -EINVAL;
1389                 goto free_cs_object;
1390         }
1391
1392         /*
1393          * store the (external/HW queues) streams used by the CS in the
1394          * fence object for multi-CS completion
1395          */
1396         if (hdev->supports_wait_for_multi_cs)
1397                 cs->fence->stream_master_qid_map = stream_master_qid_map;
1398
1399         rc = hl_hw_queue_schedule_cs(cs);
1400         if (rc) {
1401                 if (rc != -EAGAIN)
1402                         dev_err(hdev->dev,
1403                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1404                                 cs->ctx->asid, cs->sequence, rc);
1405                 goto free_cs_object;
1406         }
1407
1408         rc = HL_CS_STATUS_SUCCESS;
1409         goto put_cs;
1410
1411 release_cb:
1412         atomic_dec(&cb->cs_cnt);
1413         hl_cb_put(cb);
1414 free_cs_object:
1415         cs_rollback(hdev, cs);
1416         *cs_seq = ULLONG_MAX;
1417         /* The path below is both for good and erroneous exits */
1418 put_cs:
1419         /* We finished with the CS in this function, so put the ref */
1420         cs_put(cs);
1421 free_cs_chunk_array:
1422         kfree(cs_chunk_array);
1423 out:
1424         return rc;
1425 }
1426
1427 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1428                                 u64 *cs_seq)
1429 {
1430         struct hl_device *hdev = hpriv->hdev;
1431         struct hl_ctx *ctx = hpriv->ctx;
1432         bool need_soft_reset = false;
1433         int rc = 0, do_ctx_switch;
1434         void __user *chunks;
1435         u32 num_chunks, tmp;
1436         int ret;
1437
1438         do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1439
1440         if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1441                 mutex_lock(&hpriv->restore_phase_mutex);
1442
1443                 if (do_ctx_switch) {
1444                         rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1445                         if (rc) {
1446                                 dev_err_ratelimited(hdev->dev,
1447                                         "Failed to switch to context %d, rejecting CS! %d\n",
1448                                         ctx->asid, rc);
1449                                 /*
1450                                  * If we timedout, or if the device is not IDLE
1451                                  * while we want to do context-switch (-EBUSY),
1452                                  * we need to soft-reset because QMAN is
1453                                  * probably stuck. However, we can't call to
1454                                  * reset here directly because of deadlock, so
1455                                  * need to do it at the very end of this
1456                                  * function
1457                                  */
1458                                 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1459                                         need_soft_reset = true;
1460                                 mutex_unlock(&hpriv->restore_phase_mutex);
1461                                 goto out;
1462                         }
1463                 }
1464
1465                 hdev->asic_funcs->restore_phase_topology(hdev);
1466
1467                 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1468                 num_chunks = args->in.num_chunks_restore;
1469
1470                 if (!num_chunks) {
1471                         dev_dbg(hdev->dev,
1472                                 "Need to run restore phase but restore CS is empty\n");
1473                         rc = 0;
1474                 } else {
1475                         rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1476                                         cs_seq, 0, 0, hdev->timeout_jiffies);
1477                 }
1478
1479                 mutex_unlock(&hpriv->restore_phase_mutex);
1480
1481                 if (rc) {
1482                         dev_err(hdev->dev,
1483                                 "Failed to submit restore CS for context %d (%d)\n",
1484                                 ctx->asid, rc);
1485                         goto out;
1486                 }
1487
1488                 /* Need to wait for restore completion before execution phase */
1489                 if (num_chunks) {
1490                         enum hl_cs_wait_status status;
1491 wait_again:
1492                         ret = _hl_cs_wait_ioctl(hdev, ctx,
1493                                         jiffies_to_usecs(hdev->timeout_jiffies),
1494                                         *cs_seq, &status, NULL);
1495                         if (ret) {
1496                                 if (ret == -ERESTARTSYS) {
1497                                         usleep_range(100, 200);
1498                                         goto wait_again;
1499                                 }
1500
1501                                 dev_err(hdev->dev,
1502                                         "Restore CS for context %d failed to complete %d\n",
1503                                         ctx->asid, ret);
1504                                 rc = -ENOEXEC;
1505                                 goto out;
1506                         }
1507                 }
1508
1509                 ctx->thread_ctx_switch_wait_token = 1;
1510
1511         } else if (!ctx->thread_ctx_switch_wait_token) {
1512                 rc = hl_poll_timeout_memory(hdev,
1513                         &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1514                         100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1515
1516                 if (rc == -ETIMEDOUT) {
1517                         dev_err(hdev->dev,
1518                                 "context switch phase timeout (%d)\n", tmp);
1519                         goto out;
1520                 }
1521         }
1522
1523 out:
1524         if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1525                 hl_device_reset(hdev, 0);
1526
1527         return rc;
1528 }
1529
1530 /*
1531  * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1532  * if the SOB value reaches the max value move to the other SOB reserved
1533  * to the queue.
1534  * @hdev: pointer to device structure
1535  * @q_idx: stream queue index
1536  * @hw_sob: the H/W SOB used in this signal CS.
1537  * @count: signals count
1538  * @encaps_sig: tells whether it's reservation for encaps signals or not.
1539  *
1540  * Note that this function must be called while hw_queues_lock is taken.
1541  */
1542 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1543                         struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1544
1545 {
1546         struct hl_sync_stream_properties *prop;
1547         struct hl_hw_sob *sob = *hw_sob, *other_sob;
1548         u8 other_sob_offset;
1549
1550         prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1551
1552         hw_sob_get(sob);
1553
1554         /* check for wraparound */
1555         if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1556                 /*
1557                  * Decrement as we reached the max value.
1558                  * The release function won't be called here as we've
1559                  * just incremented the refcount right before calling this
1560                  * function.
1561                  */
1562                 hw_sob_put_err(sob);
1563
1564                 /*
1565                  * check the other sob value, if it still in use then fail
1566                  * otherwise make the switch
1567                  */
1568                 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1569                 other_sob = &prop->hw_sob[other_sob_offset];
1570
1571                 if (kref_read(&other_sob->kref) != 1) {
1572                         dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1573                                                                 q_idx);
1574                         return -EINVAL;
1575                 }
1576
1577                 /*
1578                  * next_sob_val always points to the next available signal
1579                  * in the sob, so in encaps signals it will be the next one
1580                  * after reserving the required amount.
1581                  */
1582                 if (encaps_sig)
1583                         prop->next_sob_val = count + 1;
1584                 else
1585                         prop->next_sob_val = count;
1586
1587                 /* only two SOBs are currently in use */
1588                 prop->curr_sob_offset = other_sob_offset;
1589                 *hw_sob = other_sob;
1590
1591                 /*
1592                  * check if other_sob needs reset, then do it before using it
1593                  * for the reservation or the next signal cs.
1594                  * we do it here, and for both encaps and regular signal cs
1595                  * cases in order to avoid possible races of two kref_put
1596                  * of the sob which can occur at the same time if we move the
1597                  * sob reset(kref_put) to cs_do_release function.
1598                  * in addition, if we have combination of cs signal and
1599                  * encaps, and at the point we need to reset the sob there was
1600                  * no more reservations and only signal cs keep coming,
1601                  * in such case we need signal_cs to put the refcount and
1602                  * reset the sob.
1603                  */
1604                 if (other_sob->need_reset)
1605                         hw_sob_put(other_sob);
1606
1607                 if (encaps_sig) {
1608                         /* set reset indication for the sob */
1609                         sob->need_reset = true;
1610                         hw_sob_get(other_sob);
1611                 }
1612
1613                 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1614                                 prop->curr_sob_offset, q_idx);
1615         } else {
1616                 prop->next_sob_val += count;
1617         }
1618
1619         return 0;
1620 }
1621
1622 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1623                 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1624                 bool encaps_signals)
1625 {
1626         u64 *signal_seq_arr = NULL;
1627         u32 size_to_copy, signal_seq_arr_len;
1628         int rc = 0;
1629
1630         if (encaps_signals) {
1631                 *signal_seq = chunk->encaps_signal_seq;
1632                 return 0;
1633         }
1634
1635         signal_seq_arr_len = chunk->num_signal_seq_arr;
1636
1637         /* currently only one signal seq is supported */
1638         if (signal_seq_arr_len != 1) {
1639                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1640                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1641                 dev_err(hdev->dev,
1642                         "Wait for signal CS supports only one signal CS seq\n");
1643                 return -EINVAL;
1644         }
1645
1646         signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1647                                         sizeof(*signal_seq_arr),
1648                                         GFP_ATOMIC);
1649         if (!signal_seq_arr)
1650                 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1651                                         sizeof(*signal_seq_arr),
1652                                         GFP_KERNEL);
1653         if (!signal_seq_arr) {
1654                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1655                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1656                 return -ENOMEM;
1657         }
1658
1659         size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1660         if (copy_from_user(signal_seq_arr,
1661                                 u64_to_user_ptr(chunk->signal_seq_arr),
1662                                 size_to_copy)) {
1663                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1664                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1665                 dev_err(hdev->dev,
1666                         "Failed to copy signal seq array from user\n");
1667                 rc = -EFAULT;
1668                 goto out;
1669         }
1670
1671         /* currently it is guaranteed to have only one signal seq */
1672         *signal_seq = signal_seq_arr[0];
1673
1674 out:
1675         kfree(signal_seq_arr);
1676
1677         return rc;
1678 }
1679
1680 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1681                 struct hl_ctx *ctx, struct hl_cs *cs,
1682                 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1683 {
1684         struct hl_cs_counters_atomic *cntr;
1685         struct hl_cs_job *job;
1686         struct hl_cb *cb;
1687         u32 cb_size;
1688
1689         cntr = &hdev->aggregated_cs_counters;
1690
1691         job = hl_cs_allocate_job(hdev, q_type, true);
1692         if (!job) {
1693                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1694                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1695                 dev_err(hdev->dev, "Failed to allocate a new job\n");
1696                 return -ENOMEM;
1697         }
1698
1699         if (cs->type == CS_TYPE_WAIT)
1700                 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1701         else
1702                 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1703
1704         cb = hl_cb_kernel_create(hdev, cb_size,
1705                                 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1706         if (!cb) {
1707                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1708                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1709                 kfree(job);
1710                 return -EFAULT;
1711         }
1712
1713         job->id = 0;
1714         job->cs = cs;
1715         job->user_cb = cb;
1716         atomic_inc(&job->user_cb->cs_cnt);
1717         job->user_cb_size = cb_size;
1718         job->hw_queue_id = q_idx;
1719
1720         if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1721                         && cs->encaps_signals)
1722                 job->encaps_sig_wait_offset = encaps_signal_offset;
1723         /*
1724          * No need in parsing, user CB is the patched CB.
1725          * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1726          * the CB idr anymore and to decrement its refcount as it was
1727          * incremented inside hl_cb_kernel_create().
1728          */
1729         job->patched_cb = job->user_cb;
1730         job->job_cb_size = job->user_cb_size;
1731         hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1732
1733         /* increment refcount as for external queues we get completion */
1734         cs_get(cs);
1735
1736         cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1737
1738         list_add_tail(&job->cs_node, &cs->job_list);
1739
1740         hl_debugfs_add_job(hdev, job);
1741
1742         return 0;
1743 }
1744
1745 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1746                                 u32 q_idx, u32 count,
1747                                 u32 *handle_id, u32 *sob_addr,
1748                                 u32 *signals_count)
1749 {
1750         struct hw_queue_properties *hw_queue_prop;
1751         struct hl_sync_stream_properties *prop;
1752         struct hl_device *hdev = hpriv->hdev;
1753         struct hl_cs_encaps_sig_handle *handle;
1754         struct hl_encaps_signals_mgr *mgr;
1755         struct hl_hw_sob *hw_sob;
1756         int hdl_id;
1757         int rc = 0;
1758
1759         if (count >= HL_MAX_SOB_VAL) {
1760                 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1761                                                 count);
1762                 rc = -EINVAL;
1763                 goto out;
1764         }
1765
1766         if (q_idx >= hdev->asic_prop.max_queues) {
1767                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1768                         q_idx);
1769                 rc = -EINVAL;
1770                 goto out;
1771         }
1772
1773         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1774
1775         if (!hw_queue_prop->supports_sync_stream) {
1776                 dev_err(hdev->dev,
1777                         "Queue index %d does not support sync stream operations\n",
1778                                                                         q_idx);
1779                 rc = -EINVAL;
1780                 goto out;
1781         }
1782
1783         prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1784
1785         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1786         if (!handle) {
1787                 rc = -ENOMEM;
1788                 goto out;
1789         }
1790
1791         handle->count = count;
1792         mgr = &hpriv->ctx->sig_mgr;
1793
1794         spin_lock(&mgr->lock);
1795         hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
1796         spin_unlock(&mgr->lock);
1797
1798         if (hdl_id < 0) {
1799                 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
1800                 rc = -EINVAL;
1801                 goto out;
1802         }
1803
1804         handle->id = hdl_id;
1805         handle->q_idx = q_idx;
1806         handle->hdev = hdev;
1807         kref_init(&handle->refcount);
1808
1809         hdev->asic_funcs->hw_queues_lock(hdev);
1810
1811         hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1812
1813         /*
1814          * Increment the SOB value by count by user request
1815          * to reserve those signals
1816          * check if the signals amount to reserve is not exceeding the max sob
1817          * value, if yes then switch sob.
1818          */
1819         rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
1820                                                                 true);
1821         if (rc) {
1822                 dev_err(hdev->dev, "Failed to switch SOB\n");
1823                 hdev->asic_funcs->hw_queues_unlock(hdev);
1824                 rc = -EINVAL;
1825                 goto remove_idr;
1826         }
1827         /* set the hw_sob to the handle after calling the sob wraparound handler
1828          * since sob could have changed.
1829          */
1830         handle->hw_sob = hw_sob;
1831
1832         /* store the current sob value for unreserve validity check, and
1833          * signal offset support
1834          */
1835         handle->pre_sob_val = prop->next_sob_val - handle->count;
1836
1837         *signals_count = prop->next_sob_val;
1838         hdev->asic_funcs->hw_queues_unlock(hdev);
1839
1840         *sob_addr = handle->hw_sob->sob_addr;
1841         *handle_id = hdl_id;
1842
1843         dev_dbg(hdev->dev,
1844                 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
1845                         hw_sob->sob_id, handle->hw_sob->sob_addr,
1846                         prop->next_sob_val - 1, q_idx, hdl_id);
1847         goto out;
1848
1849 remove_idr:
1850         spin_lock(&mgr->lock);
1851         idr_remove(&mgr->handles, hdl_id);
1852         spin_unlock(&mgr->lock);
1853
1854         kfree(handle);
1855 out:
1856         return rc;
1857 }
1858
1859 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
1860 {
1861         struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1862         struct hl_sync_stream_properties *prop;
1863         struct hl_device *hdev = hpriv->hdev;
1864         struct hl_encaps_signals_mgr *mgr;
1865         struct hl_hw_sob *hw_sob;
1866         u32 q_idx, sob_addr;
1867         int rc = 0;
1868
1869         mgr = &hpriv->ctx->sig_mgr;
1870
1871         spin_lock(&mgr->lock);
1872         encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
1873         if (encaps_sig_hdl) {
1874                 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
1875                                 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
1876                                         encaps_sig_hdl->count);
1877
1878                 hdev->asic_funcs->hw_queues_lock(hdev);
1879
1880                 q_idx = encaps_sig_hdl->q_idx;
1881                 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1882                 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1883                 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
1884
1885                 /* Check if sob_val got out of sync due to other
1886                  * signal submission requests which were handled
1887                  * between the reserve-unreserve calls or SOB switch
1888                  * upon reaching SOB max value.
1889                  */
1890                 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
1891                                 != prop->next_sob_val ||
1892                                 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
1893                         dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
1894                                 encaps_sig_hdl->pre_sob_val,
1895                                 (prop->next_sob_val - encaps_sig_hdl->count));
1896
1897                         hdev->asic_funcs->hw_queues_unlock(hdev);
1898                         rc = -EINVAL;
1899                         goto out;
1900                 }
1901
1902                 /*
1903                  * Decrement the SOB value by count by user request
1904                  * to unreserve those signals
1905                  */
1906                 prop->next_sob_val -= encaps_sig_hdl->count;
1907
1908                 hdev->asic_funcs->hw_queues_unlock(hdev);
1909
1910                 hw_sob_put(hw_sob);
1911
1912                 /* Release the id and free allocated memory of the handle */
1913                 idr_remove(&mgr->handles, handle_id);
1914                 kfree(encaps_sig_hdl);
1915         } else {
1916                 rc = -EINVAL;
1917                 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
1918         }
1919 out:
1920         spin_unlock(&mgr->lock);
1921
1922         return rc;
1923 }
1924
1925 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1926                                 void __user *chunks, u32 num_chunks,
1927                                 u64 *cs_seq, u32 flags, u32 timeout)
1928 {
1929         struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
1930         bool handle_found = false, is_wait_cs = false,
1931                         wait_cs_submitted = false,
1932                         cs_encaps_signals = false;
1933         struct hl_cs_chunk *cs_chunk_array, *chunk;
1934         bool staged_cs_with_encaps_signals = false;
1935         struct hw_queue_properties *hw_queue_prop;
1936         struct hl_device *hdev = hpriv->hdev;
1937         struct hl_cs_compl *sig_waitcs_cmpl;
1938         u32 q_idx, collective_engine_id = 0;
1939         struct hl_cs_counters_atomic *cntr;
1940         struct hl_fence *sig_fence = NULL;
1941         struct hl_ctx *ctx = hpriv->ctx;
1942         enum hl_queue_type q_type;
1943         struct hl_cs *cs;
1944         u64 signal_seq;
1945         int rc;
1946
1947         cntr = &hdev->aggregated_cs_counters;
1948         *cs_seq = ULLONG_MAX;
1949
1950         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1951                         ctx);
1952         if (rc)
1953                 goto out;
1954
1955         /* currently it is guaranteed to have only one chunk */
1956         chunk = &cs_chunk_array[0];
1957
1958         if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1959                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1960                 atomic64_inc(&cntr->validation_drop_cnt);
1961                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1962                         chunk->queue_index);
1963                 rc = -EINVAL;
1964                 goto free_cs_chunk_array;
1965         }
1966
1967         q_idx = chunk->queue_index;
1968         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1969         q_type = hw_queue_prop->type;
1970
1971         if (!hw_queue_prop->supports_sync_stream) {
1972                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1973                 atomic64_inc(&cntr->validation_drop_cnt);
1974                 dev_err(hdev->dev,
1975                         "Queue index %d does not support sync stream operations\n",
1976                         q_idx);
1977                 rc = -EINVAL;
1978                 goto free_cs_chunk_array;
1979         }
1980
1981         if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1982                 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1983                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1984                         atomic64_inc(&cntr->validation_drop_cnt);
1985                         dev_err(hdev->dev,
1986                                 "Queue index %d is invalid\n", q_idx);
1987                         rc = -EINVAL;
1988                         goto free_cs_chunk_array;
1989                 }
1990
1991                 collective_engine_id = chunk->collective_engine_id;
1992         }
1993
1994         is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
1995                         cs_type == CS_TYPE_COLLECTIVE_WAIT);
1996
1997         cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
1998
1999         if (is_wait_cs) {
2000                 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2001                                 ctx, cs_encaps_signals);
2002                 if (rc)
2003                         goto free_cs_chunk_array;
2004
2005                 if (cs_encaps_signals) {
2006                         /* check if cs sequence has encapsulated
2007                          * signals handle
2008                          */
2009                         struct idr *idp;
2010                         u32 id;
2011
2012                         spin_lock(&ctx->sig_mgr.lock);
2013                         idp = &ctx->sig_mgr.handles;
2014                         idr_for_each_entry(idp, encaps_sig_hdl, id) {
2015                                 if (encaps_sig_hdl->cs_seq == signal_seq) {
2016                                         handle_found = true;
2017                                         /* get refcount to protect removing
2018                                          * this handle from idr, needed when
2019                                          * multiple wait cs are used with offset
2020                                          * to wait on reserved encaps signals.
2021                                          */
2022                                         kref_get(&encaps_sig_hdl->refcount);
2023                                         break;
2024                                 }
2025                         }
2026                         spin_unlock(&ctx->sig_mgr.lock);
2027
2028                         if (!handle_found) {
2029                                 dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2030                                                 signal_seq);
2031                                 rc = -EINVAL;
2032                                 goto free_cs_chunk_array;
2033                         }
2034
2035                         /* validate also the signal offset value */
2036                         if (chunk->encaps_signal_offset >
2037                                         encaps_sig_hdl->count) {
2038                                 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2039                                                 chunk->encaps_signal_offset,
2040                                                 encaps_sig_hdl->count);
2041                                 rc = -EINVAL;
2042                                 goto free_cs_chunk_array;
2043                         }
2044                 }
2045
2046                 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2047                 if (IS_ERR(sig_fence)) {
2048                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2049                         atomic64_inc(&cntr->validation_drop_cnt);
2050                         dev_err(hdev->dev,
2051                                 "Failed to get signal CS with seq 0x%llx\n",
2052                                 signal_seq);
2053                         rc = PTR_ERR(sig_fence);
2054                         goto free_cs_chunk_array;
2055                 }
2056
2057                 if (!sig_fence) {
2058                         /* signal CS already finished */
2059                         rc = 0;
2060                         goto free_cs_chunk_array;
2061                 }
2062
2063                 sig_waitcs_cmpl =
2064                         container_of(sig_fence, struct hl_cs_compl, base_fence);
2065
2066                 staged_cs_with_encaps_signals = !!
2067                                 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2068                                 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2069
2070                 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2071                                 !staged_cs_with_encaps_signals) {
2072                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2073                         atomic64_inc(&cntr->validation_drop_cnt);
2074                         dev_err(hdev->dev,
2075                                 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2076                                 signal_seq);
2077                         hl_fence_put(sig_fence);
2078                         rc = -EINVAL;
2079                         goto free_cs_chunk_array;
2080                 }
2081
2082                 if (completion_done(&sig_fence->completion)) {
2083                         /* signal CS already finished */
2084                         hl_fence_put(sig_fence);
2085                         rc = 0;
2086                         goto free_cs_chunk_array;
2087                 }
2088         }
2089
2090         rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2091         if (rc) {
2092                 if (is_wait_cs)
2093                         hl_fence_put(sig_fence);
2094
2095                 goto free_cs_chunk_array;
2096         }
2097
2098         /*
2099          * Save the signal CS fence for later initialization right before
2100          * hanging the wait CS on the queue.
2101          * for encaps signals case, we save the cs sequence and handle pointer
2102          * for later initialization.
2103          */
2104         if (is_wait_cs) {
2105                 cs->signal_fence = sig_fence;
2106                 /* store the handle pointer, so we don't have to
2107                  * look for it again, later on the flow
2108                  * when we need to set SOB info in hw_queue.
2109                  */
2110                 if (cs->encaps_signals)
2111                         cs->encaps_sig_hdl = encaps_sig_hdl;
2112         }
2113
2114         hl_debugfs_add_cs(cs);
2115
2116         *cs_seq = cs->sequence;
2117
2118         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2119                 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2120                                 q_idx, chunk->encaps_signal_offset);
2121         else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2122                 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2123                                 cs, q_idx, collective_engine_id,
2124                                 chunk->encaps_signal_offset);
2125         else {
2126                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2127                 atomic64_inc(&cntr->validation_drop_cnt);
2128                 rc = -EINVAL;
2129         }
2130
2131         if (rc)
2132                 goto free_cs_object;
2133
2134         rc = hl_hw_queue_schedule_cs(cs);
2135         if (rc) {
2136                 /* In case wait cs failed here, it means the signal cs
2137                  * already completed. we want to free all it's related objects
2138                  * but we don't want to fail the ioctl.
2139                  */
2140                 if (is_wait_cs)
2141                         rc = 0;
2142                 else if (rc != -EAGAIN)
2143                         dev_err(hdev->dev,
2144                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2145                                 ctx->asid, cs->sequence, rc);
2146                 goto free_cs_object;
2147         }
2148
2149         rc = HL_CS_STATUS_SUCCESS;
2150         if (is_wait_cs)
2151                 wait_cs_submitted = true;
2152         goto put_cs;
2153
2154 free_cs_object:
2155         cs_rollback(hdev, cs);
2156         *cs_seq = ULLONG_MAX;
2157         /* The path below is both for good and erroneous exits */
2158 put_cs:
2159         /* We finished with the CS in this function, so put the ref */
2160         cs_put(cs);
2161 free_cs_chunk_array:
2162         if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
2163                                                         is_wait_cs)
2164                 kref_put(&encaps_sig_hdl->refcount,
2165                                 hl_encaps_handle_do_release);
2166         kfree(cs_chunk_array);
2167 out:
2168         return rc;
2169 }
2170
2171 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2172 {
2173         union hl_cs_args *args = data;
2174         enum hl_cs_type cs_type = 0;
2175         u64 cs_seq = ULONG_MAX;
2176         void __user *chunks;
2177         u32 num_chunks, flags, timeout,
2178                 signals_count = 0, sob_addr = 0, handle_id = 0;
2179         int rc;
2180
2181         rc = hl_cs_sanity_checks(hpriv, args);
2182         if (rc)
2183                 goto out;
2184
2185         rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2186         if (rc)
2187                 goto out;
2188
2189         cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2190                                         ~HL_CS_FLAGS_FORCE_RESTORE);
2191         chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2192         num_chunks = args->in.num_chunks_execute;
2193         flags = args->in.cs_flags;
2194
2195         /* In case this is a staged CS, user should supply the CS sequence */
2196         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2197                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2198                 cs_seq = args->in.seq;
2199
2200         timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2201                         ? msecs_to_jiffies(args->in.timeout * 1000)
2202                         : hpriv->hdev->timeout_jiffies;
2203
2204         switch (cs_type) {
2205         case CS_TYPE_SIGNAL:
2206         case CS_TYPE_WAIT:
2207         case CS_TYPE_COLLECTIVE_WAIT:
2208                 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2209                                         &cs_seq, args->in.cs_flags, timeout);
2210                 break;
2211         case CS_RESERVE_SIGNALS:
2212                 rc = cs_ioctl_reserve_signals(hpriv,
2213                                         args->in.encaps_signals_q_idx,
2214                                         args->in.encaps_signals_count,
2215                                         &handle_id, &sob_addr, &signals_count);
2216                 break;
2217         case CS_UNRESERVE_SIGNALS:
2218                 rc = cs_ioctl_unreserve_signals(hpriv,
2219                                         args->in.encaps_sig_handle_id);
2220                 break;
2221         default:
2222                 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2223                                                 args->in.cs_flags,
2224                                                 args->in.encaps_sig_handle_id,
2225                                                 timeout);
2226                 break;
2227         }
2228 out:
2229         if (rc != -EAGAIN) {
2230                 memset(args, 0, sizeof(*args));
2231
2232                 if (cs_type == CS_RESERVE_SIGNALS) {
2233                         args->out.handle_id = handle_id;
2234                         args->out.sob_base_addr_offset = sob_addr;
2235                         args->out.count = signals_count;
2236                 } else {
2237                         args->out.seq = cs_seq;
2238                 }
2239                 args->out.status = rc;
2240         }
2241
2242         return rc;
2243 }
2244
2245 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2246                                 enum hl_cs_wait_status *status, u64 timeout_us,
2247                                 s64 *timestamp)
2248 {
2249         struct hl_device *hdev = ctx->hdev;
2250         long completion_rc;
2251         int rc = 0;
2252
2253         if (IS_ERR(fence)) {
2254                 rc = PTR_ERR(fence);
2255                 if (rc == -EINVAL)
2256                         dev_notice_ratelimited(hdev->dev,
2257                                 "Can't wait on CS %llu because current CS is at seq %llu\n",
2258                                 seq, ctx->cs_sequence);
2259                 return rc;
2260         }
2261
2262         if (!fence) {
2263                 dev_dbg(hdev->dev,
2264                         "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2265                                 seq, ctx->cs_sequence);
2266
2267                 *status = CS_WAIT_STATUS_GONE;
2268                 return 0;
2269         }
2270
2271         if (!timeout_us) {
2272                 completion_rc = completion_done(&fence->completion);
2273         } else {
2274                 unsigned long timeout;
2275
2276                 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2277                                 timeout_us : usecs_to_jiffies(timeout_us);
2278                 completion_rc =
2279                         wait_for_completion_interruptible_timeout(
2280                                 &fence->completion, timeout);
2281         }
2282
2283         if (completion_rc > 0) {
2284                 *status = CS_WAIT_STATUS_COMPLETED;
2285                 if (timestamp)
2286                         *timestamp = ktime_to_ns(fence->timestamp);
2287         } else {
2288                 *status = CS_WAIT_STATUS_BUSY;
2289         }
2290
2291         if (fence->error == -ETIMEDOUT)
2292                 rc = -ETIMEDOUT;
2293         else if (fence->error == -EIO)
2294                 rc = -EIO;
2295
2296         return rc;
2297 }
2298
2299 /*
2300  * hl_cs_poll_fences - iterate CS fences to check for CS completion
2301  *
2302  * @mcs_data: multi-CS internal data
2303  *
2304  * @return 0 on success, otherwise non 0 error code
2305  *
2306  * The function iterates on all CS sequence in the list and set bit in
2307  * completion_bitmap for each completed CS.
2308  * while iterating, the function can extracts the stream map to be later
2309  * used by the waiting function.
2310  * this function shall be called after taking context ref
2311  */
2312 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
2313 {
2314         struct hl_fence **fence_ptr = mcs_data->fence_arr;
2315         struct hl_device *hdev = mcs_data->ctx->hdev;
2316         int i, rc, arr_len = mcs_data->arr_len;
2317         u64 *seq_arr = mcs_data->seq_arr;
2318         ktime_t max_ktime, first_cs_time;
2319         enum hl_cs_wait_status status;
2320
2321         memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
2322
2323         /* get all fences under the same lock */
2324         rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2325         if (rc)
2326                 return rc;
2327
2328         /*
2329          * set to maximum time to verify timestamp is valid: if at the end
2330          * this value is maintained- no timestamp was updated
2331          */
2332         max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2333         first_cs_time = max_ktime;
2334
2335         for (i = 0; i < arr_len; i++, fence_ptr++) {
2336                 struct hl_fence *fence = *fence_ptr;
2337
2338                 /*
2339                  * function won't sleep as it is called with timeout 0 (i.e.
2340                  * poll the fence)
2341                  */
2342                 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
2343                                                 &status, 0, NULL);
2344                 if (rc) {
2345                         dev_err(hdev->dev,
2346                                 "wait_for_fence error :%d for CS seq %llu\n",
2347                                                                 rc, seq_arr[i]);
2348                         break;
2349                 }
2350
2351                 mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
2352
2353                 if (status == CS_WAIT_STATUS_BUSY)
2354                         continue;
2355
2356                 mcs_data->completion_bitmap |= BIT(i);
2357
2358                 /*
2359                  * best effort to extract timestamp. few notes:
2360                  * - if even single fence is gone we cannot extract timestamp
2361                  *   (as fence not exist anymore)
2362                  * - for all completed CSs we take the earliest timestamp.
2363                  *   for this we have to validate that:
2364                  *       1. given timestamp was indeed set
2365                  *       2. the timestamp is earliest of all timestamps so far
2366                  */
2367
2368                 if (status == CS_WAIT_STATUS_GONE) {
2369                         mcs_data->update_ts = false;
2370                         mcs_data->gone_cs = true;
2371                 } else if (mcs_data->update_ts &&
2372                         (ktime_compare(fence->timestamp,
2373                                                 ktime_set(0, 0)) > 0) &&
2374                         (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
2375                         first_cs_time = fence->timestamp;
2376                 }
2377         }
2378
2379         hl_fences_put(mcs_data->fence_arr, arr_len);
2380
2381         if (mcs_data->update_ts &&
2382                         (ktime_compare(first_cs_time, max_ktime) != 0))
2383                 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2384
2385         return rc;
2386 }
2387
2388 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2389                                 u64 timeout_us, u64 seq,
2390                                 enum hl_cs_wait_status *status, s64 *timestamp)
2391 {
2392         struct hl_fence *fence;
2393         int rc = 0;
2394
2395         if (timestamp)
2396                 *timestamp = 0;
2397
2398         hl_ctx_get(hdev, ctx);
2399
2400         fence = hl_ctx_get_fence(ctx, seq);
2401
2402         rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2403         hl_fence_put(fence);
2404         hl_ctx_put(ctx);
2405
2406         return rc;
2407 }
2408
2409 /*
2410  * hl_wait_multi_cs_completion_init - init completion structure
2411  *
2412  * @hdev: pointer to habanalabs device structure
2413  * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2414  *                        master QID to wait on
2415  *
2416  * @return valid completion struct pointer on success, otherwise error pointer
2417  *
2418  * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2419  * the function gets the first available completion (by marking it "used")
2420  * and initialize its values.
2421  */
2422 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
2423                                                         struct hl_device *hdev,
2424                                                         u8 stream_master_bitmap)
2425 {
2426         struct multi_cs_completion *mcs_compl;
2427         int i;
2428
2429         /* find free multi_cs completion structure */
2430         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2431                 mcs_compl = &hdev->multi_cs_completion[i];
2432                 spin_lock(&mcs_compl->lock);
2433                 if (!mcs_compl->used) {
2434                         mcs_compl->used = 1;
2435                         mcs_compl->timestamp = 0;
2436                         mcs_compl->stream_master_qid_map = stream_master_bitmap;
2437                         reinit_completion(&mcs_compl->completion);
2438                         spin_unlock(&mcs_compl->lock);
2439                         break;
2440                 }
2441                 spin_unlock(&mcs_compl->lock);
2442         }
2443
2444         if (i == MULTI_CS_MAX_USER_CTX) {
2445                 dev_err(hdev->dev,
2446                                 "no available multi-CS completion structure\n");
2447                 return ERR_PTR(-ENOMEM);
2448         }
2449         return mcs_compl;
2450 }
2451
2452 /*
2453  * hl_wait_multi_cs_completion_fini - return completion structure and set as
2454  *                                    unused
2455  *
2456  * @mcs_compl: pointer to the completion structure
2457  */
2458 static void hl_wait_multi_cs_completion_fini(
2459                                         struct multi_cs_completion *mcs_compl)
2460 {
2461         /*
2462          * free completion structure, do it under lock to be in-sync with the
2463          * thread that signals completion
2464          */
2465         spin_lock(&mcs_compl->lock);
2466         mcs_compl->used = 0;
2467         spin_unlock(&mcs_compl->lock);
2468 }
2469
2470 /*
2471  * hl_wait_multi_cs_completion - wait for first CS to complete
2472  *
2473  * @mcs_data: multi-CS internal data
2474  *
2475  * @return 0 on success, otherwise non 0 error code
2476  */
2477 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
2478 {
2479         struct hl_device *hdev = mcs_data->ctx->hdev;
2480         struct multi_cs_completion *mcs_compl;
2481         long completion_rc;
2482
2483         mcs_compl = hl_wait_multi_cs_completion_init(hdev,
2484                                         mcs_data->stream_master_qid_map);
2485         if (IS_ERR(mcs_compl))
2486                 return PTR_ERR(mcs_compl);
2487
2488         completion_rc = wait_for_completion_interruptible_timeout(
2489                                         &mcs_compl->completion,
2490                                         usecs_to_jiffies(mcs_data->timeout_us));
2491
2492         /* update timestamp */
2493         if (completion_rc > 0)
2494                 mcs_data->timestamp = mcs_compl->timestamp;
2495
2496         hl_wait_multi_cs_completion_fini(mcs_compl);
2497
2498         mcs_data->wait_status = completion_rc;
2499
2500         return 0;
2501 }
2502
2503 /*
2504  * hl_multi_cs_completion_init - init array of multi-CS completion structures
2505  *
2506  * @hdev: pointer to habanalabs device structure
2507  */
2508 void hl_multi_cs_completion_init(struct hl_device *hdev)
2509 {
2510         struct multi_cs_completion *mcs_cmpl;
2511         int i;
2512
2513         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2514                 mcs_cmpl = &hdev->multi_cs_completion[i];
2515                 mcs_cmpl->used = 0;
2516                 spin_lock_init(&mcs_cmpl->lock);
2517                 init_completion(&mcs_cmpl->completion);
2518         }
2519 }
2520
2521 /*
2522  * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2523  *
2524  * @hpriv: pointer to the private data of the fd
2525  * @data: pointer to multi-CS wait ioctl in/out args
2526  *
2527  */
2528 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2529 {
2530         struct hl_device *hdev = hpriv->hdev;
2531         struct multi_cs_data mcs_data = {0};
2532         union hl_wait_cs_args *args = data;
2533         struct hl_ctx *ctx = hpriv->ctx;
2534         struct hl_fence **fence_arr;
2535         void __user *seq_arr;
2536         u32 size_to_copy;
2537         u64 *cs_seq_arr;
2538         u8 seq_arr_len;
2539         int rc;
2540
2541         if (!hdev->supports_wait_for_multi_cs) {
2542                 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2543                 return -EPERM;
2544         }
2545
2546         seq_arr_len = args->in.seq_arr_len;
2547
2548         if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2549                 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2550                                 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2551                 return -EINVAL;
2552         }
2553
2554         /* allocate memory for sequence array */
2555         cs_seq_arr =
2556                 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2557         if (!cs_seq_arr)
2558                 return -ENOMEM;
2559
2560         /* copy CS sequence array from user */
2561         seq_arr = (void __user *) (uintptr_t) args->in.seq;
2562         size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2563         if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2564                 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2565                 rc = -EFAULT;
2566                 goto free_seq_arr;
2567         }
2568
2569         /* allocate array for the fences */
2570         fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
2571         if (!fence_arr) {
2572                 rc = -ENOMEM;
2573                 goto free_seq_arr;
2574         }
2575
2576         /* initialize the multi-CS internal data */
2577         mcs_data.ctx = ctx;
2578         mcs_data.seq_arr = cs_seq_arr;
2579         mcs_data.fence_arr = fence_arr;
2580         mcs_data.arr_len = seq_arr_len;
2581
2582         hl_ctx_get(hdev, ctx);
2583
2584         /* poll all CS fences, extract timestamp */
2585         mcs_data.update_ts = true;
2586         rc = hl_cs_poll_fences(&mcs_data);
2587         /*
2588          * skip wait for CS completion when one of the below is true:
2589          * - an error on the poll function
2590          * - one or more CS in the list completed
2591          * - the user called ioctl with timeout 0
2592          */
2593         if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
2594                 goto put_ctx;
2595
2596         /* wait (with timeout) for the first CS to be completed */
2597         mcs_data.timeout_us = args->in.timeout_us;
2598         rc = hl_wait_multi_cs_completion(&mcs_data);
2599         if (rc)
2600                 goto put_ctx;
2601
2602         if (mcs_data.wait_status > 0) {
2603                 /*
2604                  * poll fences once again to update the CS map.
2605                  * no timestamp should be updated this time.
2606                  */
2607                 mcs_data.update_ts = false;
2608                 rc = hl_cs_poll_fences(&mcs_data);
2609
2610                 /*
2611                  * if hl_wait_multi_cs_completion returned before timeout (i.e.
2612                  * it got a completion) we expect to see at least one CS
2613                  * completed after the poll function.
2614                  */
2615                 if (!mcs_data.completion_bitmap) {
2616                         dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
2617                         rc = -EFAULT;
2618                 }
2619         }
2620
2621 put_ctx:
2622         hl_ctx_put(ctx);
2623         kfree(fence_arr);
2624
2625 free_seq_arr:
2626         kfree(cs_seq_arr);
2627
2628         /* update output args */
2629         memset(args, 0, sizeof(*args));
2630         if (rc)
2631                 return rc;
2632
2633         if (mcs_data.completion_bitmap) {
2634                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2635                 args->out.cs_completion_map = mcs_data.completion_bitmap;
2636
2637                 /* if timestamp not 0- it's valid */
2638                 if (mcs_data.timestamp) {
2639                         args->out.timestamp_nsec = mcs_data.timestamp;
2640                         args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2641                 }
2642
2643                 /* update if some CS was gone */
2644                 if (mcs_data.timestamp)
2645                         args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2646         } else if (mcs_data.wait_status == -ERESTARTSYS) {
2647                 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
2648         } else {
2649                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2650         }
2651
2652         return 0;
2653 }
2654
2655 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2656 {
2657         struct hl_device *hdev = hpriv->hdev;
2658         union hl_wait_cs_args *args = data;
2659         enum hl_cs_wait_status status;
2660         u64 seq = args->in.seq;
2661         s64 timestamp;
2662         int rc;
2663
2664         rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
2665                                 &status, &timestamp);
2666
2667         memset(args, 0, sizeof(*args));
2668
2669         if (rc) {
2670                 if (rc == -ERESTARTSYS) {
2671                         dev_err_ratelimited(hdev->dev,
2672                                 "user process got signal while waiting for CS handle %llu\n",
2673                                 seq);
2674                         args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
2675                         rc = -EINTR;
2676                 } else if (rc == -ETIMEDOUT) {
2677                         dev_err_ratelimited(hdev->dev,
2678                                 "CS %llu has timed-out while user process is waiting for it\n",
2679                                 seq);
2680                         args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
2681                 } else if (rc == -EIO) {
2682                         dev_err_ratelimited(hdev->dev,
2683                                 "CS %llu has been aborted while user process is waiting for it\n",
2684                                 seq);
2685                         args->out.status = HL_WAIT_CS_STATUS_ABORTED;
2686                 }
2687                 return rc;
2688         }
2689
2690         if (timestamp) {
2691                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2692                 args->out.timestamp_nsec = timestamp;
2693         }
2694
2695         switch (status) {
2696         case CS_WAIT_STATUS_GONE:
2697                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2698                 fallthrough;
2699         case CS_WAIT_STATUS_COMPLETED:
2700                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2701                 break;
2702         case CS_WAIT_STATUS_BUSY:
2703         default:
2704                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2705                 break;
2706         }
2707
2708         return 0;
2709 }
2710
2711 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2712                                 u32 timeout_us, u64 user_address,
2713                                 u32 target_value, u16 interrupt_offset,
2714                                 enum hl_cs_wait_status *status)
2715 {
2716         struct hl_user_pending_interrupt *pend;
2717         struct hl_user_interrupt *interrupt;
2718         unsigned long timeout, flags;
2719         u32 completion_value;
2720         long completion_rc;
2721         int rc = 0;
2722
2723         if (timeout_us == U32_MAX)
2724                 timeout = timeout_us;
2725         else
2726                 timeout = usecs_to_jiffies(timeout_us);
2727
2728         hl_ctx_get(hdev, ctx);
2729
2730         pend = kmalloc(sizeof(*pend), GFP_KERNEL);
2731         if (!pend) {
2732                 hl_ctx_put(ctx);
2733                 return -ENOMEM;
2734         }
2735
2736         hl_fence_init(&pend->fence, ULONG_MAX);
2737
2738         if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
2739                 interrupt = &hdev->common_user_interrupt;
2740         else
2741                 interrupt = &hdev->user_interrupt[interrupt_offset];
2742
2743         if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2744                 dev_err(hdev->dev, "Failed to copy completion value from user\n");
2745                 rc = -EFAULT;
2746                 goto free_fence;
2747         }
2748
2749         if (completion_value >= target_value)
2750                 *status = CS_WAIT_STATUS_COMPLETED;
2751         else
2752                 *status = CS_WAIT_STATUS_BUSY;
2753
2754         if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
2755                 goto free_fence;
2756
2757         /* Add pending user interrupt to relevant list for the interrupt
2758          * handler to monitor
2759          */
2760         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2761         list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
2762         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2763
2764 wait_again:
2765         /* Wait for interrupt handler to signal completion */
2766         completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
2767                                                                                 timeout);
2768
2769         /* If timeout did not expire we need to perform the comparison.
2770          * If comparison fails, keep waiting until timeout expires
2771          */
2772         if (completion_rc > 0) {
2773                 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2774                         dev_err(hdev->dev, "Failed to copy completion value from user\n");
2775                         rc = -EFAULT;
2776
2777                         goto remove_pending_user_interrupt;
2778                 }
2779
2780                 if (completion_value >= target_value) {
2781                         *status = CS_WAIT_STATUS_COMPLETED;
2782                 } else {
2783                         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2784                         reinit_completion(&pend->fence.completion);
2785                         timeout = completion_rc;
2786
2787                         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2788                         goto wait_again;
2789                 }
2790         } else if (completion_rc == -ERESTARTSYS) {
2791                 dev_err_ratelimited(hdev->dev,
2792                         "user process got signal while waiting for interrupt ID %d\n",
2793                         interrupt->interrupt_id);
2794                 *status = HL_WAIT_CS_STATUS_INTERRUPTED;
2795                 rc = -EINTR;
2796         } else {
2797                 *status = CS_WAIT_STATUS_BUSY;
2798         }
2799
2800 remove_pending_user_interrupt:
2801         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2802         list_del(&pend->wait_list_node);
2803         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2804
2805 free_fence:
2806         kfree(pend);
2807         hl_ctx_put(ctx);
2808
2809         return rc;
2810 }
2811
2812 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2813 {
2814         u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
2815         struct hl_device *hdev = hpriv->hdev;
2816         struct asic_fixed_properties *prop;
2817         union hl_wait_cs_args *args = data;
2818         enum hl_cs_wait_status status;
2819         int rc;
2820
2821         prop = &hdev->asic_prop;
2822
2823         if (!prop->user_interrupt_count) {
2824                 dev_err(hdev->dev, "no user interrupts allowed");
2825                 return -EPERM;
2826         }
2827
2828         interrupt_id =
2829                 FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
2830
2831         first_interrupt = prop->first_available_user_msix_interrupt;
2832         last_interrupt = prop->first_available_user_msix_interrupt +
2833                                                 prop->user_interrupt_count - 1;
2834
2835         if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
2836                         interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
2837                 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
2838                 return -EINVAL;
2839         }
2840
2841         if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
2842                 interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
2843         else
2844                 interrupt_offset = interrupt_id - first_interrupt;
2845
2846         rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
2847                                 args->in.interrupt_timeout_us, args->in.addr,
2848                                 args->in.target, interrupt_offset, &status);
2849
2850         memset(args, 0, sizeof(*args));
2851
2852         if (rc) {
2853                 if (rc != -EINTR)
2854                         dev_err_ratelimited(hdev->dev,
2855                                 "interrupt_wait_ioctl failed (%d)\n", rc);
2856
2857                 return rc;
2858         }
2859
2860         switch (status) {
2861         case CS_WAIT_STATUS_COMPLETED:
2862                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2863                 break;
2864         case CS_WAIT_STATUS_BUSY:
2865         default:
2866                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2867                 break;
2868         }
2869
2870         return 0;
2871 }
2872
2873 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2874 {
2875         union hl_wait_cs_args *args = data;
2876         u32 flags = args->in.flags;
2877         int rc;
2878
2879         /* If the device is not operational, no point in waiting for any command submission or
2880          * user interrupt
2881          */
2882         if (!hl_device_operational(hpriv->hdev, NULL))
2883                 return -EPERM;
2884
2885         if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
2886                 rc = hl_interrupt_wait_ioctl(hpriv, data);
2887         else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
2888                 rc = hl_multi_cs_wait_ioctl(hpriv, data);
2889         else
2890                 rc = hl_cs_wait_ioctl(hpriv, data);
2891
2892         return rc;
2893 }