Merge tag 'platform-drivers-x86-surface-aggregator-v5.13-1' of git://git.kernel.org...
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / command_submission.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK   (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15                                 HL_CS_FLAGS_COLLECTIVE_WAIT)
16
17 /**
18  * enum hl_cs_wait_status - cs wait status
19  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20  * @CS_WAIT_STATUS_COMPLETED: cs completed
21  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
22  */
23 enum hl_cs_wait_status {
24         CS_WAIT_STATUS_BUSY,
25         CS_WAIT_STATUS_COMPLETED,
26         CS_WAIT_STATUS_GONE
27 };
28
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31                                 u64 timeout_us, u64 seq,
32                                 enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
34
35 static void hl_sob_reset(struct kref *ref)
36 {
37         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38                                                         kref);
39         struct hl_device *hdev = hw_sob->hdev;
40
41         hdev->asic_funcs->reset_sob(hdev, hw_sob);
42 }
43
44 void hl_sob_reset_error(struct kref *ref)
45 {
46         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
47                                                         kref);
48         struct hl_device *hdev = hw_sob->hdev;
49
50         dev_crit(hdev->dev,
51                 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
52                 hw_sob->q_idx, hw_sob->sob_id);
53 }
54
55 /**
56  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
57  * @sob_base: sob base id
58  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
59  * @mask: generated mask
60  *
61  * Return: 0 if given parameters are valid
62  */
63 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
64 {
65         int i;
66
67         if (sob_mask == 0)
68                 return -EINVAL;
69
70         if (sob_mask == 0x1) {
71                 *mask = ~(1 << (sob_base & 0x7));
72         } else {
73                 /* find msb in order to verify sob range is valid */
74                 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
75                         if (BIT(i) & sob_mask)
76                                 break;
77
78                 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
79                         return -EINVAL;
80
81                 *mask = ~sob_mask;
82         }
83
84         return 0;
85 }
86
87 static void hl_fence_release(struct kref *kref)
88 {
89         struct hl_fence *fence =
90                 container_of(kref, struct hl_fence, refcount);
91         struct hl_cs_compl *hl_cs_cmpl =
92                 container_of(fence, struct hl_cs_compl, base_fence);
93         struct hl_device *hdev = hl_cs_cmpl->hdev;
94
95         /* EBUSY means the CS was never submitted and hence we don't have
96          * an attached hw_sob object that we should handle here
97          */
98         if (fence->error == -EBUSY)
99                 goto free;
100
101         if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
102                 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
103                 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
104
105                 dev_dbg(hdev->dev,
106                         "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
107                         hl_cs_cmpl->cs_seq,
108                         hl_cs_cmpl->type,
109                         hl_cs_cmpl->hw_sob->sob_id,
110                         hl_cs_cmpl->sob_val);
111
112                 /*
113                  * A signal CS can get completion while the corresponding wait
114                  * for signal CS is on its way to the PQ. The wait for signal CS
115                  * will get stuck if the signal CS incremented the SOB to its
116                  * max value and there are no pending (submitted) waits on this
117                  * SOB.
118                  * We do the following to void this situation:
119                  * 1. The wait for signal CS must get a ref for the signal CS as
120                  *    soon as possible in cs_ioctl_signal_wait() and put it
121                  *    before being submitted to the PQ but after it incremented
122                  *    the SOB refcnt in init_signal_wait_cs().
123                  * 2. Signal/Wait for signal CS will decrement the SOB refcnt
124                  *    here.
125                  * These two measures guarantee that the wait for signal CS will
126                  * reset the SOB upon completion rather than the signal CS and
127                  * hence the above scenario is avoided.
128                  */
129                 kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
130
131                 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
132                         hdev->asic_funcs->reset_sob_group(hdev,
133                                         hl_cs_cmpl->sob_group);
134         }
135
136 free:
137         kfree(hl_cs_cmpl);
138 }
139
140 void hl_fence_put(struct hl_fence *fence)
141 {
142         if (fence)
143                 kref_put(&fence->refcount, hl_fence_release);
144 }
145
146 void hl_fence_get(struct hl_fence *fence)
147 {
148         if (fence)
149                 kref_get(&fence->refcount);
150 }
151
152 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
153 {
154         kref_init(&fence->refcount);
155         fence->cs_sequence = sequence;
156         fence->error = 0;
157         fence->timestamp = ktime_set(0, 0);
158         init_completion(&fence->completion);
159 }
160
161 void cs_get(struct hl_cs *cs)
162 {
163         kref_get(&cs->refcount);
164 }
165
166 static int cs_get_unless_zero(struct hl_cs *cs)
167 {
168         return kref_get_unless_zero(&cs->refcount);
169 }
170
171 static void cs_put(struct hl_cs *cs)
172 {
173         kref_put(&cs->refcount, cs_do_release);
174 }
175
176 static void cs_job_do_release(struct kref *ref)
177 {
178         struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
179
180         kfree(job);
181 }
182
183 static void cs_job_put(struct hl_cs_job *job)
184 {
185         kref_put(&job->refcount, cs_job_do_release);
186 }
187
188 bool cs_needs_completion(struct hl_cs *cs)
189 {
190         /* In case this is a staged CS, only the last CS in sequence should
191          * get a completion, any non staged CS will always get a completion
192          */
193         if (cs->staged_cs && !cs->staged_last)
194                 return false;
195
196         return true;
197 }
198
199 bool cs_needs_timeout(struct hl_cs *cs)
200 {
201         /* In case this is a staged CS, only the first CS in sequence should
202          * get a timeout, any non staged CS will always get a timeout
203          */
204         if (cs->staged_cs && !cs->staged_first)
205                 return false;
206
207         return true;
208 }
209
210 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
211 {
212         /*
213          * Patched CB is created for external queues jobs, and for H/W queues
214          * jobs if the user CB was allocated by driver and MMU is disabled.
215          */
216         return (job->queue_type == QUEUE_TYPE_EXT ||
217                         (job->queue_type == QUEUE_TYPE_HW &&
218                                         job->is_kernel_allocated_cb &&
219                                         !hdev->mmu_enable));
220 }
221
222 /*
223  * cs_parser - parse the user command submission
224  *
225  * @hpriv       : pointer to the private data of the fd
226  * @job        : pointer to the job that holds the command submission info
227  *
228  * The function parses the command submission of the user. It calls the
229  * ASIC specific parser, which returns a list of memory blocks to send
230  * to the device as different command buffers
231  *
232  */
233 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
234 {
235         struct hl_device *hdev = hpriv->hdev;
236         struct hl_cs_parser parser;
237         int rc;
238
239         parser.ctx_id = job->cs->ctx->asid;
240         parser.cs_sequence = job->cs->sequence;
241         parser.job_id = job->id;
242
243         parser.hw_queue_id = job->hw_queue_id;
244         parser.job_userptr_list = &job->userptr_list;
245         parser.patched_cb = NULL;
246         parser.user_cb = job->user_cb;
247         parser.user_cb_size = job->user_cb_size;
248         parser.queue_type = job->queue_type;
249         parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
250         job->patched_cb = NULL;
251         parser.completion = cs_needs_completion(job->cs);
252
253         rc = hdev->asic_funcs->cs_parser(hdev, &parser);
254
255         if (is_cb_patched(hdev, job)) {
256                 if (!rc) {
257                         job->patched_cb = parser.patched_cb;
258                         job->job_cb_size = parser.patched_cb_size;
259                         job->contains_dma_pkt = parser.contains_dma_pkt;
260                         atomic_inc(&job->patched_cb->cs_cnt);
261                 }
262
263                 /*
264                  * Whether the parsing worked or not, we don't need the
265                  * original CB anymore because it was already parsed and
266                  * won't be accessed again for this CS
267                  */
268                 atomic_dec(&job->user_cb->cs_cnt);
269                 hl_cb_put(job->user_cb);
270                 job->user_cb = NULL;
271         } else if (!rc) {
272                 job->job_cb_size = job->user_cb_size;
273         }
274
275         return rc;
276 }
277
278 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
279 {
280         struct hl_cs *cs = job->cs;
281
282         if (is_cb_patched(hdev, job)) {
283                 hl_userptr_delete_list(hdev, &job->userptr_list);
284
285                 /*
286                  * We might arrive here from rollback and patched CB wasn't
287                  * created, so we need to check it's not NULL
288                  */
289                 if (job->patched_cb) {
290                         atomic_dec(&job->patched_cb->cs_cnt);
291                         hl_cb_put(job->patched_cb);
292                 }
293         }
294
295         /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
296          * enabled, the user CB isn't released in cs_parser() and thus should be
297          * released here.
298          * This is also true for INT queues jobs which were allocated by driver
299          */
300         if (job->is_kernel_allocated_cb &&
301                 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
302                                 job->queue_type == QUEUE_TYPE_INT)) {
303                 atomic_dec(&job->user_cb->cs_cnt);
304                 hl_cb_put(job->user_cb);
305         }
306
307         /*
308          * This is the only place where there can be multiple threads
309          * modifying the list at the same time
310          */
311         spin_lock(&cs->job_lock);
312         list_del(&job->cs_node);
313         spin_unlock(&cs->job_lock);
314
315         hl_debugfs_remove_job(hdev, job);
316
317         /* We decrement reference only for a CS that gets completion
318          * because the reference was incremented only for this kind of CS
319          * right before it was scheduled.
320          *
321          * In staged submission, only the last CS marked as 'staged_last'
322          * gets completion, hence its release function will be called from here.
323          * As for all the rest CS's in the staged submission which do not get
324          * completion, their CS reference will be decremented by the
325          * 'staged_last' CS during the CS release flow.
326          * All relevant PQ CI counters will be incremented during the CS release
327          * flow by calling 'hl_hw_queue_update_ci'.
328          */
329         if (cs_needs_completion(cs) &&
330                 (job->queue_type == QUEUE_TYPE_EXT ||
331                         job->queue_type == QUEUE_TYPE_HW))
332                 cs_put(cs);
333
334         cs_job_put(job);
335 }
336
337 /*
338  * hl_staged_cs_find_first - locate the first CS in this staged submission
339  *
340  * @hdev: pointer to device structure
341  * @cs_seq: staged submission sequence number
342  *
343  * @note: This function must be called under 'hdev->cs_mirror_lock'
344  *
345  * Find and return a CS pointer with the given sequence
346  */
347 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
348 {
349         struct hl_cs *cs;
350
351         list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
352                 if (cs->staged_cs && cs->staged_first &&
353                                 cs->sequence == cs_seq)
354                         return cs;
355
356         return NULL;
357 }
358
359 /*
360  * is_staged_cs_last_exists - returns true if the last CS in sequence exists
361  *
362  * @hdev: pointer to device structure
363  * @cs: staged submission member
364  *
365  */
366 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
367 {
368         struct hl_cs *last_entry;
369
370         last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
371                                                                 staged_cs_node);
372
373         if (last_entry->staged_last)
374                 return true;
375
376         return false;
377 }
378
379 /*
380  * staged_cs_get - get CS reference if this CS is a part of a staged CS
381  *
382  * @hdev: pointer to device structure
383  * @cs: current CS
384  * @cs_seq: staged submission sequence number
385  *
386  * Increment CS reference for every CS in this staged submission except for
387  * the CS which get completion.
388  */
389 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
390 {
391         /* Only the last CS in this staged submission will get a completion.
392          * We must increment the reference for all other CS's in this
393          * staged submission.
394          * Once we get a completion we will release the whole staged submission.
395          */
396         if (!cs->staged_last)
397                 cs_get(cs);
398 }
399
400 /*
401  * staged_cs_put - put a CS in case it is part of staged submission
402  *
403  * @hdev: pointer to device structure
404  * @cs: CS to put
405  *
406  * This function decrements a CS reference (for a non completion CS)
407  */
408 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
409 {
410         /* We release all CS's in a staged submission except the last
411          * CS which we have never incremented its reference.
412          */
413         if (!cs_needs_completion(cs))
414                 cs_put(cs);
415 }
416
417 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
418 {
419         bool next_entry_found = false;
420         struct hl_cs *next;
421
422         if (!cs_needs_timeout(cs))
423                 return;
424
425         spin_lock(&hdev->cs_mirror_lock);
426
427         /* We need to handle tdr only once for the complete staged submission.
428          * Hence, we choose the CS that reaches this function first which is
429          * the CS marked as 'staged_last'.
430          */
431         if (cs->staged_cs && cs->staged_last)
432                 cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
433
434         spin_unlock(&hdev->cs_mirror_lock);
435
436         /* Don't cancel TDR in case this CS was timedout because we might be
437          * running from the TDR context
438          */
439         if (cs && (cs->timedout ||
440                         hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
441                 return;
442
443         if (cs && cs->tdr_active)
444                 cancel_delayed_work_sync(&cs->work_tdr);
445
446         spin_lock(&hdev->cs_mirror_lock);
447
448         /* queue TDR for next CS */
449         list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
450                 if (cs_needs_timeout(next)) {
451                         next_entry_found = true;
452                         break;
453                 }
454
455         if (next_entry_found && !next->tdr_active) {
456                 next->tdr_active = true;
457                 schedule_delayed_work(&next->work_tdr,
458                                         hdev->timeout_jiffies);
459         }
460
461         spin_unlock(&hdev->cs_mirror_lock);
462 }
463
464 static void cs_do_release(struct kref *ref)
465 {
466         struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
467         struct hl_device *hdev = cs->ctx->hdev;
468         struct hl_cs_job *job, *tmp;
469
470         cs->completed = true;
471
472         /*
473          * Although if we reached here it means that all external jobs have
474          * finished, because each one of them took refcnt to CS, we still
475          * need to go over the internal jobs and complete them. Otherwise, we
476          * will have leaked memory and what's worse, the CS object (and
477          * potentially the CTX object) could be released, while the JOB
478          * still holds a pointer to them (but no reference).
479          */
480         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
481                 complete_job(hdev, job);
482
483         if (!cs->submitted) {
484                 /* In case the wait for signal CS was submitted, the put occurs
485                  * in init_signal_wait_cs() or collective_wait_init_cs()
486                  * right before hanging on the PQ.
487                  */
488                 if (cs->type == CS_TYPE_WAIT ||
489                                 cs->type == CS_TYPE_COLLECTIVE_WAIT)
490                         hl_fence_put(cs->signal_fence);
491
492                 goto out;
493         }
494
495         hdev->asic_funcs->hw_queues_lock(hdev);
496
497         hdev->cs_active_cnt--;
498         if (!hdev->cs_active_cnt) {
499                 struct hl_device_idle_busy_ts *ts;
500
501                 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
502                 ts->busy_to_idle_ts = ktime_get();
503
504                 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
505                         hdev->idle_busy_ts_idx = 0;
506         } else if (hdev->cs_active_cnt < 0) {
507                 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
508                         hdev->cs_active_cnt);
509         }
510
511         hdev->asic_funcs->hw_queues_unlock(hdev);
512
513         /* Need to update CI for all queue jobs that does not get completion */
514         hl_hw_queue_update_ci(cs);
515
516         /* remove CS from CS mirror list */
517         spin_lock(&hdev->cs_mirror_lock);
518         list_del_init(&cs->mirror_node);
519         spin_unlock(&hdev->cs_mirror_lock);
520
521         cs_handle_tdr(hdev, cs);
522
523         if (cs->staged_cs) {
524                 /* the completion CS decrements reference for the entire
525                  * staged submission
526                  */
527                 if (cs->staged_last) {
528                         struct hl_cs *staged_cs, *tmp;
529
530                         list_for_each_entry_safe(staged_cs, tmp,
531                                         &cs->staged_cs_node, staged_cs_node)
532                                 staged_cs_put(hdev, staged_cs);
533                 }
534
535                 /* A staged CS will be a member in the list only after it
536                  * was submitted. We used 'cs_mirror_lock' when inserting
537                  * it to list so we will use it again when removing it
538                  */
539                 if (cs->submitted) {
540                         spin_lock(&hdev->cs_mirror_lock);
541                         list_del(&cs->staged_cs_node);
542                         spin_unlock(&hdev->cs_mirror_lock);
543                 }
544         }
545
546 out:
547         /* Must be called before hl_ctx_put because inside we use ctx to get
548          * the device
549          */
550         hl_debugfs_remove_cs(cs);
551
552         hl_ctx_put(cs->ctx);
553
554         /* We need to mark an error for not submitted because in that case
555          * the hl fence release flow is different. Mainly, we don't need
556          * to handle hw_sob for signal/wait
557          */
558         if (cs->timedout)
559                 cs->fence->error = -ETIMEDOUT;
560         else if (cs->aborted)
561                 cs->fence->error = -EIO;
562         else if (!cs->submitted)
563                 cs->fence->error = -EBUSY;
564
565         if (cs->timestamp)
566                 cs->fence->timestamp = ktime_get();
567         complete_all(&cs->fence->completion);
568         hl_fence_put(cs->fence);
569
570         kfree(cs->jobs_in_queue_cnt);
571         kfree(cs);
572 }
573
574 static void cs_timedout(struct work_struct *work)
575 {
576         struct hl_device *hdev;
577         int rc;
578         struct hl_cs *cs = container_of(work, struct hl_cs,
579                                                  work_tdr.work);
580         rc = cs_get_unless_zero(cs);
581         if (!rc)
582                 return;
583
584         if ((!cs->submitted) || (cs->completed)) {
585                 cs_put(cs);
586                 return;
587         }
588
589         /* Mark the CS is timed out so we won't try to cancel its TDR */
590         cs->timedout = true;
591
592         hdev = cs->ctx->hdev;
593
594         switch (cs->type) {
595         case CS_TYPE_SIGNAL:
596                 dev_err(hdev->dev,
597                         "Signal command submission %llu has not finished in time!\n",
598                         cs->sequence);
599                 break;
600
601         case CS_TYPE_WAIT:
602                 dev_err(hdev->dev,
603                         "Wait command submission %llu has not finished in time!\n",
604                         cs->sequence);
605                 break;
606
607         case CS_TYPE_COLLECTIVE_WAIT:
608                 dev_err(hdev->dev,
609                         "Collective Wait command submission %llu has not finished in time!\n",
610                         cs->sequence);
611                 break;
612
613         default:
614                 dev_err(hdev->dev,
615                         "Command submission %llu has not finished in time!\n",
616                         cs->sequence);
617                 break;
618         }
619
620         cs_put(cs);
621
622         if (hdev->reset_on_lockup)
623                 hl_device_reset(hdev, false, false);
624         else
625                 hdev->needs_reset = true;
626 }
627
628 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
629                         enum hl_cs_type cs_type, u64 user_sequence,
630                         struct hl_cs **cs_new)
631 {
632         struct hl_cs_counters_atomic *cntr;
633         struct hl_fence *other = NULL;
634         struct hl_cs_compl *cs_cmpl;
635         struct hl_cs *cs;
636         int rc;
637
638         cntr = &hdev->aggregated_cs_counters;
639
640         cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
641         if (!cs) {
642                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
643                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
644                 return -ENOMEM;
645         }
646
647         /* increment refcnt for context */
648         hl_ctx_get(hdev, ctx);
649
650         cs->ctx = ctx;
651         cs->submitted = false;
652         cs->completed = false;
653         cs->type = cs_type;
654         INIT_LIST_HEAD(&cs->job_list);
655         INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
656         kref_init(&cs->refcount);
657         spin_lock_init(&cs->job_lock);
658
659         cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
660         if (!cs_cmpl) {
661                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
662                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
663                 rc = -ENOMEM;
664                 goto free_cs;
665         }
666
667         cs_cmpl->hdev = hdev;
668         cs_cmpl->type = cs->type;
669         spin_lock_init(&cs_cmpl->lock);
670         cs->fence = &cs_cmpl->base_fence;
671
672         spin_lock(&ctx->cs_lock);
673
674         cs_cmpl->cs_seq = ctx->cs_sequence;
675         other = ctx->cs_pending[cs_cmpl->cs_seq &
676                                 (hdev->asic_prop.max_pending_cs - 1)];
677
678         if (other && !completion_done(&other->completion)) {
679                 /* If the following statement is true, it means we have reached
680                  * a point in which only part of the staged submission was
681                  * submitted and we don't have enough room in the 'cs_pending'
682                  * array for the rest of the submission.
683                  * This causes a deadlock because this CS will never be
684                  * completed as it depends on future CS's for completion.
685                  */
686                 if (other->cs_sequence == user_sequence)
687                         dev_crit_ratelimited(hdev->dev,
688                                 "Staged CS %llu deadlock due to lack of resources",
689                                 user_sequence);
690
691                 dev_dbg_ratelimited(hdev->dev,
692                         "Rejecting CS because of too many in-flights CS\n");
693                 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
694                 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
695                 rc = -EAGAIN;
696                 goto free_fence;
697         }
698
699         cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
700                         sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
701         if (!cs->jobs_in_queue_cnt) {
702                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
703                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
704                 rc = -ENOMEM;
705                 goto free_fence;
706         }
707
708         /* init hl_fence */
709         hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
710
711         cs->sequence = cs_cmpl->cs_seq;
712
713         ctx->cs_pending[cs_cmpl->cs_seq &
714                         (hdev->asic_prop.max_pending_cs - 1)] =
715                                                         &cs_cmpl->base_fence;
716         ctx->cs_sequence++;
717
718         hl_fence_get(&cs_cmpl->base_fence);
719
720         hl_fence_put(other);
721
722         spin_unlock(&ctx->cs_lock);
723
724         *cs_new = cs;
725
726         return 0;
727
728 free_fence:
729         spin_unlock(&ctx->cs_lock);
730         kfree(cs_cmpl);
731 free_cs:
732         kfree(cs);
733         hl_ctx_put(ctx);
734         return rc;
735 }
736
737 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
738 {
739         struct hl_cs_job *job, *tmp;
740
741         staged_cs_put(hdev, cs);
742
743         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
744                 complete_job(hdev, job);
745 }
746
747 void hl_cs_rollback_all(struct hl_device *hdev)
748 {
749         int i;
750         struct hl_cs *cs, *tmp;
751
752         /* flush all completions before iterating over the CS mirror list in
753          * order to avoid a race with the release functions
754          */
755         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
756                 flush_workqueue(hdev->cq_wq[i]);
757
758         /* Make sure we don't have leftovers in the CS mirror list */
759         list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
760                 cs_get(cs);
761                 cs->aborted = true;
762                 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
763                                 cs->ctx->asid, cs->sequence);
764                 cs_rollback(hdev, cs);
765                 cs_put(cs);
766         }
767 }
768
769 void hl_pending_cb_list_flush(struct hl_ctx *ctx)
770 {
771         struct hl_pending_cb *pending_cb, *tmp;
772
773         list_for_each_entry_safe(pending_cb, tmp,
774                         &ctx->pending_cb_list, cb_node) {
775                 list_del(&pending_cb->cb_node);
776                 hl_cb_put(pending_cb->cb);
777                 kfree(pending_cb);
778         }
779 }
780
781 static void job_wq_completion(struct work_struct *work)
782 {
783         struct hl_cs_job *job = container_of(work, struct hl_cs_job,
784                                                 finish_work);
785         struct hl_cs *cs = job->cs;
786         struct hl_device *hdev = cs->ctx->hdev;
787
788         /* job is no longer needed */
789         complete_job(hdev, job);
790 }
791
792 static int validate_queue_index(struct hl_device *hdev,
793                                 struct hl_cs_chunk *chunk,
794                                 enum hl_queue_type *queue_type,
795                                 bool *is_kernel_allocated_cb)
796 {
797         struct asic_fixed_properties *asic = &hdev->asic_prop;
798         struct hw_queue_properties *hw_queue_prop;
799
800         /* This must be checked here to prevent out-of-bounds access to
801          * hw_queues_props array
802          */
803         if (chunk->queue_index >= asic->max_queues) {
804                 dev_err(hdev->dev, "Queue index %d is invalid\n",
805                         chunk->queue_index);
806                 return -EINVAL;
807         }
808
809         hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
810
811         if (hw_queue_prop->type == QUEUE_TYPE_NA) {
812                 dev_err(hdev->dev, "Queue index %d is invalid\n",
813                         chunk->queue_index);
814                 return -EINVAL;
815         }
816
817         if (hw_queue_prop->driver_only) {
818                 dev_err(hdev->dev,
819                         "Queue index %d is restricted for the kernel driver\n",
820                         chunk->queue_index);
821                 return -EINVAL;
822         }
823
824         /* When hw queue type isn't QUEUE_TYPE_HW,
825          * USER_ALLOC_CB flag shall be referred as "don't care".
826          */
827         if (hw_queue_prop->type == QUEUE_TYPE_HW) {
828                 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
829                         if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
830                                 dev_err(hdev->dev,
831                                         "Queue index %d doesn't support user CB\n",
832                                         chunk->queue_index);
833                                 return -EINVAL;
834                         }
835
836                         *is_kernel_allocated_cb = false;
837                 } else {
838                         if (!(hw_queue_prop->cb_alloc_flags &
839                                         CB_ALLOC_KERNEL)) {
840                                 dev_err(hdev->dev,
841                                         "Queue index %d doesn't support kernel CB\n",
842                                         chunk->queue_index);
843                                 return -EINVAL;
844                         }
845
846                         *is_kernel_allocated_cb = true;
847                 }
848         } else {
849                 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
850                                                 & CB_ALLOC_KERNEL);
851         }
852
853         *queue_type = hw_queue_prop->type;
854         return 0;
855 }
856
857 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
858                                         struct hl_cb_mgr *cb_mgr,
859                                         struct hl_cs_chunk *chunk)
860 {
861         struct hl_cb *cb;
862         u32 cb_handle;
863
864         cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
865
866         cb = hl_cb_get(hdev, cb_mgr, cb_handle);
867         if (!cb) {
868                 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
869                 return NULL;
870         }
871
872         if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
873                 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
874                 goto release_cb;
875         }
876
877         atomic_inc(&cb->cs_cnt);
878
879         return cb;
880
881 release_cb:
882         hl_cb_put(cb);
883         return NULL;
884 }
885
886 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
887                 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
888 {
889         struct hl_cs_job *job;
890
891         job = kzalloc(sizeof(*job), GFP_ATOMIC);
892         if (!job)
893                 return NULL;
894
895         kref_init(&job->refcount);
896         job->queue_type = queue_type;
897         job->is_kernel_allocated_cb = is_kernel_allocated_cb;
898
899         if (is_cb_patched(hdev, job))
900                 INIT_LIST_HEAD(&job->userptr_list);
901
902         if (job->queue_type == QUEUE_TYPE_EXT)
903                 INIT_WORK(&job->finish_work, job_wq_completion);
904
905         return job;
906 }
907
908 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
909 {
910         if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
911                 return CS_TYPE_SIGNAL;
912         else if (cs_type_flags & HL_CS_FLAGS_WAIT)
913                 return CS_TYPE_WAIT;
914         else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
915                 return CS_TYPE_COLLECTIVE_WAIT;
916         else
917                 return CS_TYPE_DEFAULT;
918 }
919
920 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
921 {
922         struct hl_device *hdev = hpriv->hdev;
923         struct hl_ctx *ctx = hpriv->ctx;
924         u32 cs_type_flags, num_chunks;
925         enum hl_device_status status;
926         enum hl_cs_type cs_type;
927
928         if (!hl_device_operational(hdev, &status)) {
929                 dev_warn_ratelimited(hdev->dev,
930                         "Device is %s. Can't submit new CS\n",
931                         hdev->status[status]);
932                 return -EBUSY;
933         }
934
935         if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
936                         !hdev->supports_staged_submission) {
937                 dev_err(hdev->dev, "staged submission not supported");
938                 return -EPERM;
939         }
940
941         cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
942
943         if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
944                 dev_err(hdev->dev,
945                         "CS type flags are mutually exclusive, context %d\n",
946                         ctx->asid);
947                 return -EINVAL;
948         }
949
950         cs_type = hl_cs_get_cs_type(cs_type_flags);
951         num_chunks = args->in.num_chunks_execute;
952
953         if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
954                                         !hdev->supports_sync_stream)) {
955                 dev_err(hdev->dev, "Sync stream CS is not supported\n");
956                 return -EINVAL;
957         }
958
959         if (cs_type == CS_TYPE_DEFAULT) {
960                 if (!num_chunks) {
961                         dev_err(hdev->dev,
962                                 "Got execute CS with 0 chunks, context %d\n",
963                                 ctx->asid);
964                         return -EINVAL;
965                 }
966         } else if (num_chunks != 1) {
967                 dev_err(hdev->dev,
968                         "Sync stream CS mandates one chunk only, context %d\n",
969                         ctx->asid);
970                 return -EINVAL;
971         }
972
973         return 0;
974 }
975
976 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
977                                         struct hl_cs_chunk **cs_chunk_array,
978                                         void __user *chunks, u32 num_chunks,
979                                         struct hl_ctx *ctx)
980 {
981         u32 size_to_copy;
982
983         if (num_chunks > HL_MAX_JOBS_PER_CS) {
984                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
985                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
986                 dev_err(hdev->dev,
987                         "Number of chunks can NOT be larger than %d\n",
988                         HL_MAX_JOBS_PER_CS);
989                 return -EINVAL;
990         }
991
992         *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
993                                         GFP_ATOMIC);
994         if (!*cs_chunk_array) {
995                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
996                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
997                 return -ENOMEM;
998         }
999
1000         size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1001         if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1002                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1003                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1004                 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1005                 kfree(*cs_chunk_array);
1006                 return -EFAULT;
1007         }
1008
1009         return 0;
1010 }
1011
1012 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1013                                 u64 sequence, u32 flags)
1014 {
1015         if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1016                 return 0;
1017
1018         cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1019         cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1020
1021         if (cs->staged_first) {
1022                 /* Staged CS sequence is the first CS sequence */
1023                 INIT_LIST_HEAD(&cs->staged_cs_node);
1024                 cs->staged_sequence = cs->sequence;
1025         } else {
1026                 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1027                  * under the cs_mirror_lock
1028                  */
1029                 cs->staged_sequence = sequence;
1030         }
1031
1032         /* Increment CS reference if needed */
1033         staged_cs_get(hdev, cs);
1034
1035         cs->staged_cs = true;
1036
1037         return 0;
1038 }
1039
1040 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1041                                 u32 num_chunks, u64 *cs_seq, u32 flags)
1042 {
1043         bool staged_mid, int_queues_only = true;
1044         struct hl_device *hdev = hpriv->hdev;
1045         struct hl_cs_chunk *cs_chunk_array;
1046         struct hl_cs_counters_atomic *cntr;
1047         struct hl_ctx *ctx = hpriv->ctx;
1048         struct hl_cs_job *job;
1049         struct hl_cs *cs;
1050         struct hl_cb *cb;
1051         u64 user_sequence;
1052         int rc, i;
1053
1054         cntr = &hdev->aggregated_cs_counters;
1055         user_sequence = *cs_seq;
1056         *cs_seq = ULLONG_MAX;
1057
1058         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1059                         hpriv->ctx);
1060         if (rc)
1061                 goto out;
1062
1063         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1064                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1065                 staged_mid = true;
1066         else
1067                 staged_mid = false;
1068
1069         rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1070                         staged_mid ? user_sequence : ULLONG_MAX, &cs);
1071         if (rc)
1072                 goto free_cs_chunk_array;
1073
1074         cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
1075         *cs_seq = cs->sequence;
1076
1077         hl_debugfs_add_cs(cs);
1078
1079         rc = cs_staged_submission(hdev, cs, user_sequence, flags);
1080         if (rc)
1081                 goto free_cs_object;
1082
1083         /* Validate ALL the CS chunks before submitting the CS */
1084         for (i = 0 ; i < num_chunks ; i++) {
1085                 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1086                 enum hl_queue_type queue_type;
1087                 bool is_kernel_allocated_cb;
1088
1089                 rc = validate_queue_index(hdev, chunk, &queue_type,
1090                                                 &is_kernel_allocated_cb);
1091                 if (rc) {
1092                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1093                         atomic64_inc(&cntr->validation_drop_cnt);
1094                         goto free_cs_object;
1095                 }
1096
1097                 if (is_kernel_allocated_cb) {
1098                         cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
1099                         if (!cb) {
1100                                 atomic64_inc(
1101                                         &ctx->cs_counters.validation_drop_cnt);
1102                                 atomic64_inc(&cntr->validation_drop_cnt);
1103                                 rc = -EINVAL;
1104                                 goto free_cs_object;
1105                         }
1106                 } else {
1107                         cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1108                 }
1109
1110                 if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
1111                         int_queues_only = false;
1112
1113                 job = hl_cs_allocate_job(hdev, queue_type,
1114                                                 is_kernel_allocated_cb);
1115                 if (!job) {
1116                         atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1117                         atomic64_inc(&cntr->out_of_mem_drop_cnt);
1118                         dev_err(hdev->dev, "Failed to allocate a new job\n");
1119                         rc = -ENOMEM;
1120                         if (is_kernel_allocated_cb)
1121                                 goto release_cb;
1122
1123                         goto free_cs_object;
1124                 }
1125
1126                 job->id = i + 1;
1127                 job->cs = cs;
1128                 job->user_cb = cb;
1129                 job->user_cb_size = chunk->cb_size;
1130                 job->hw_queue_id = chunk->queue_index;
1131
1132                 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1133
1134                 list_add_tail(&job->cs_node, &cs->job_list);
1135
1136                 /*
1137                  * Increment CS reference. When CS reference is 0, CS is
1138                  * done and can be signaled to user and free all its resources
1139                  * Only increment for JOB on external or H/W queues, because
1140                  * only for those JOBs we get completion
1141                  */
1142                 if (cs_needs_completion(cs) &&
1143                         (job->queue_type == QUEUE_TYPE_EXT ||
1144                                 job->queue_type == QUEUE_TYPE_HW))
1145                         cs_get(cs);
1146
1147                 hl_debugfs_add_job(hdev, job);
1148
1149                 rc = cs_parser(hpriv, job);
1150                 if (rc) {
1151                         atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1152                         atomic64_inc(&cntr->parsing_drop_cnt);
1153                         dev_err(hdev->dev,
1154                                 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1155                                 cs->ctx->asid, cs->sequence, job->id, rc);
1156                         goto free_cs_object;
1157                 }
1158         }
1159
1160         /* We allow a CS with any queue type combination as long as it does
1161          * not get a completion
1162          */
1163         if (int_queues_only && cs_needs_completion(cs)) {
1164                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1165                 atomic64_inc(&cntr->validation_drop_cnt);
1166                 dev_err(hdev->dev,
1167                         "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1168                         cs->ctx->asid, cs->sequence);
1169                 rc = -EINVAL;
1170                 goto free_cs_object;
1171         }
1172
1173         rc = hl_hw_queue_schedule_cs(cs);
1174         if (rc) {
1175                 if (rc != -EAGAIN)
1176                         dev_err(hdev->dev,
1177                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1178                                 cs->ctx->asid, cs->sequence, rc);
1179                 goto free_cs_object;
1180         }
1181
1182         rc = HL_CS_STATUS_SUCCESS;
1183         goto put_cs;
1184
1185 release_cb:
1186         atomic_dec(&cb->cs_cnt);
1187         hl_cb_put(cb);
1188 free_cs_object:
1189         cs_rollback(hdev, cs);
1190         *cs_seq = ULLONG_MAX;
1191         /* The path below is both for good and erroneous exits */
1192 put_cs:
1193         /* We finished with the CS in this function, so put the ref */
1194         cs_put(cs);
1195 free_cs_chunk_array:
1196         kfree(cs_chunk_array);
1197 out:
1198         return rc;
1199 }
1200
1201 static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx,
1202                 struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id)
1203 {
1204         struct hw_queue_properties *hw_queue_prop;
1205         struct hl_cs_counters_atomic *cntr;
1206         struct hl_cs_job *job;
1207
1208         hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id];
1209         cntr = &hdev->aggregated_cs_counters;
1210
1211         job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
1212         if (!job) {
1213                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1214                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1215                 dev_err(hdev->dev, "Failed to allocate a new job\n");
1216                 return -ENOMEM;
1217         }
1218
1219         job->id = 0;
1220         job->cs = cs;
1221         job->user_cb = cb;
1222         atomic_inc(&job->user_cb->cs_cnt);
1223         job->user_cb_size = size;
1224         job->hw_queue_id = hw_queue_id;
1225         job->patched_cb = job->user_cb;
1226         job->job_cb_size = job->user_cb_size;
1227
1228         /* increment refcount as for external queues we get completion */
1229         cs_get(cs);
1230
1231         cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1232
1233         list_add_tail(&job->cs_node, &cs->job_list);
1234
1235         hl_debugfs_add_job(hdev, job);
1236
1237         return 0;
1238 }
1239
1240 static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
1241 {
1242         struct hl_device *hdev = hpriv->hdev;
1243         struct hl_ctx *ctx = hpriv->ctx;
1244         struct hl_pending_cb *pending_cb, *tmp;
1245         struct list_head local_cb_list;
1246         struct hl_cs *cs;
1247         struct hl_cb *cb;
1248         u32 hw_queue_id;
1249         u32 cb_size;
1250         int process_list, rc = 0;
1251
1252         if (list_empty(&ctx->pending_cb_list))
1253                 return 0;
1254
1255         process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0);
1256
1257         /* Only a single thread is allowed to process the list */
1258         if (!process_list)
1259                 return 0;
1260
1261         if (list_empty(&ctx->pending_cb_list))
1262                 goto free_pending_cb_token;
1263
1264         /* move all list elements to a local list */
1265         INIT_LIST_HEAD(&local_cb_list);
1266         spin_lock(&ctx->pending_cb_lock);
1267         list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list,
1268                                                                 cb_node)
1269                 list_move_tail(&pending_cb->cb_node, &local_cb_list);
1270         spin_unlock(&ctx->pending_cb_lock);
1271
1272         rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs);
1273         if (rc)
1274                 goto add_list_elements;
1275
1276         hl_debugfs_add_cs(cs);
1277
1278         /* Iterate through pending cb list, create jobs and add to CS */
1279         list_for_each_entry(pending_cb, &local_cb_list, cb_node) {
1280                 cb = pending_cb->cb;
1281                 cb_size = pending_cb->cb_size;
1282                 hw_queue_id = pending_cb->hw_queue_id;
1283
1284                 rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size,
1285                                                                 hw_queue_id);
1286                 if (rc)
1287                         goto free_cs_object;
1288         }
1289
1290         rc = hl_hw_queue_schedule_cs(cs);
1291         if (rc) {
1292                 if (rc != -EAGAIN)
1293                         dev_err(hdev->dev,
1294                                 "Failed to submit CS %d.%llu (%d)\n",
1295                                 ctx->asid, cs->sequence, rc);
1296                 goto free_cs_object;
1297         }
1298
1299         /* pending cb was scheduled successfully */
1300         list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) {
1301                 list_del(&pending_cb->cb_node);
1302                 kfree(pending_cb);
1303         }
1304
1305         cs_put(cs);
1306
1307         goto free_pending_cb_token;
1308
1309 free_cs_object:
1310         cs_rollback(hdev, cs);
1311         cs_put(cs);
1312 add_list_elements:
1313         spin_lock(&ctx->pending_cb_lock);
1314         list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list,
1315                                                                 cb_node)
1316                 list_move(&pending_cb->cb_node, &ctx->pending_cb_list);
1317         spin_unlock(&ctx->pending_cb_lock);
1318 free_pending_cb_token:
1319         atomic_set(&ctx->thread_pending_cb_token, 1);
1320
1321         return rc;
1322 }
1323
1324 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1325                                 u64 *cs_seq)
1326 {
1327         struct hl_device *hdev = hpriv->hdev;
1328         struct hl_ctx *ctx = hpriv->ctx;
1329         bool need_soft_reset = false;
1330         int rc = 0, do_ctx_switch;
1331         void __user *chunks;
1332         u32 num_chunks, tmp;
1333         int ret;
1334
1335         do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1336
1337         if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1338                 mutex_lock(&hpriv->restore_phase_mutex);
1339
1340                 if (do_ctx_switch) {
1341                         rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1342                         if (rc) {
1343                                 dev_err_ratelimited(hdev->dev,
1344                                         "Failed to switch to context %d, rejecting CS! %d\n",
1345                                         ctx->asid, rc);
1346                                 /*
1347                                  * If we timedout, or if the device is not IDLE
1348                                  * while we want to do context-switch (-EBUSY),
1349                                  * we need to soft-reset because QMAN is
1350                                  * probably stuck. However, we can't call to
1351                                  * reset here directly because of deadlock, so
1352                                  * need to do it at the very end of this
1353                                  * function
1354                                  */
1355                                 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1356                                         need_soft_reset = true;
1357                                 mutex_unlock(&hpriv->restore_phase_mutex);
1358                                 goto out;
1359                         }
1360                 }
1361
1362                 hdev->asic_funcs->restore_phase_topology(hdev);
1363
1364                 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1365                 num_chunks = args->in.num_chunks_restore;
1366
1367                 if (!num_chunks) {
1368                         dev_dbg(hdev->dev,
1369                                 "Need to run restore phase but restore CS is empty\n");
1370                         rc = 0;
1371                 } else {
1372                         rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1373                                                                 cs_seq, 0);
1374                 }
1375
1376                 mutex_unlock(&hpriv->restore_phase_mutex);
1377
1378                 if (rc) {
1379                         dev_err(hdev->dev,
1380                                 "Failed to submit restore CS for context %d (%d)\n",
1381                                 ctx->asid, rc);
1382                         goto out;
1383                 }
1384
1385                 /* Need to wait for restore completion before execution phase */
1386                 if (num_chunks) {
1387                         enum hl_cs_wait_status status;
1388 wait_again:
1389                         ret = _hl_cs_wait_ioctl(hdev, ctx,
1390                                         jiffies_to_usecs(hdev->timeout_jiffies),
1391                                         *cs_seq, &status, NULL);
1392                         if (ret) {
1393                                 if (ret == -ERESTARTSYS) {
1394                                         usleep_range(100, 200);
1395                                         goto wait_again;
1396                                 }
1397
1398                                 dev_err(hdev->dev,
1399                                         "Restore CS for context %d failed to complete %d\n",
1400                                         ctx->asid, ret);
1401                                 rc = -ENOEXEC;
1402                                 goto out;
1403                         }
1404                 }
1405
1406                 ctx->thread_ctx_switch_wait_token = 1;
1407
1408         } else if (!ctx->thread_ctx_switch_wait_token) {
1409                 rc = hl_poll_timeout_memory(hdev,
1410                         &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1411                         100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1412
1413                 if (rc == -ETIMEDOUT) {
1414                         dev_err(hdev->dev,
1415                                 "context switch phase timeout (%d)\n", tmp);
1416                         goto out;
1417                 }
1418         }
1419
1420 out:
1421         if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1422                 hl_device_reset(hdev, false, false);
1423
1424         return rc;
1425 }
1426
1427 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1428                 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
1429 {
1430         u64 *signal_seq_arr = NULL;
1431         u32 size_to_copy, signal_seq_arr_len;
1432         int rc = 0;
1433
1434         signal_seq_arr_len = chunk->num_signal_seq_arr;
1435
1436         /* currently only one signal seq is supported */
1437         if (signal_seq_arr_len != 1) {
1438                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1439                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1440                 dev_err(hdev->dev,
1441                         "Wait for signal CS supports only one signal CS seq\n");
1442                 return -EINVAL;
1443         }
1444
1445         signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1446                                         sizeof(*signal_seq_arr),
1447                                         GFP_ATOMIC);
1448         if (!signal_seq_arr) {
1449                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1450                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1451                 return -ENOMEM;
1452         }
1453
1454         size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
1455         if (copy_from_user(signal_seq_arr,
1456                                 u64_to_user_ptr(chunk->signal_seq_arr),
1457                                 size_to_copy)) {
1458                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1459                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1460                 dev_err(hdev->dev,
1461                         "Failed to copy signal seq array from user\n");
1462                 rc = -EFAULT;
1463                 goto out;
1464         }
1465
1466         /* currently it is guaranteed to have only one signal seq */
1467         *signal_seq = signal_seq_arr[0];
1468
1469 out:
1470         kfree(signal_seq_arr);
1471
1472         return rc;
1473 }
1474
1475 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1476                 struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
1477                 u32 q_idx)
1478 {
1479         struct hl_cs_counters_atomic *cntr;
1480         struct hl_cs_job *job;
1481         struct hl_cb *cb;
1482         u32 cb_size;
1483
1484         cntr = &hdev->aggregated_cs_counters;
1485
1486         job = hl_cs_allocate_job(hdev, q_type, true);
1487         if (!job) {
1488                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1489                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1490                 dev_err(hdev->dev, "Failed to allocate a new job\n");
1491                 return -ENOMEM;
1492         }
1493
1494         if (cs->type == CS_TYPE_WAIT)
1495                 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1496         else
1497                 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1498
1499         cb = hl_cb_kernel_create(hdev, cb_size,
1500                                 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1501         if (!cb) {
1502                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1503                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1504                 kfree(job);
1505                 return -EFAULT;
1506         }
1507
1508         job->id = 0;
1509         job->cs = cs;
1510         job->user_cb = cb;
1511         atomic_inc(&job->user_cb->cs_cnt);
1512         job->user_cb_size = cb_size;
1513         job->hw_queue_id = q_idx;
1514
1515         /*
1516          * No need in parsing, user CB is the patched CB.
1517          * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1518          * the CB idr anymore and to decrement its refcount as it was
1519          * incremented inside hl_cb_kernel_create().
1520          */
1521         job->patched_cb = job->user_cb;
1522         job->job_cb_size = job->user_cb_size;
1523         hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1524
1525         /* increment refcount as for external queues we get completion */
1526         cs_get(cs);
1527
1528         cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1529
1530         list_add_tail(&job->cs_node, &cs->job_list);
1531
1532         hl_debugfs_add_job(hdev, job);
1533
1534         return 0;
1535 }
1536
1537 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1538                                 void __user *chunks, u32 num_chunks,
1539                                 u64 *cs_seq, bool timestamp)
1540 {
1541         struct hl_cs_chunk *cs_chunk_array, *chunk;
1542         struct hw_queue_properties *hw_queue_prop;
1543         struct hl_device *hdev = hpriv->hdev;
1544         struct hl_cs_compl *sig_waitcs_cmpl;
1545         u32 q_idx, collective_engine_id = 0;
1546         struct hl_cs_counters_atomic *cntr;
1547         struct hl_fence *sig_fence = NULL;
1548         struct hl_ctx *ctx = hpriv->ctx;
1549         enum hl_queue_type q_type;
1550         struct hl_cs *cs;
1551         u64 signal_seq;
1552         int rc;
1553
1554         cntr = &hdev->aggregated_cs_counters;
1555         *cs_seq = ULLONG_MAX;
1556
1557         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1558                         ctx);
1559         if (rc)
1560                 goto out;
1561
1562         /* currently it is guaranteed to have only one chunk */
1563         chunk = &cs_chunk_array[0];
1564
1565         if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1566                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1567                 atomic64_inc(&cntr->validation_drop_cnt);
1568                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1569                         chunk->queue_index);
1570                 rc = -EINVAL;
1571                 goto free_cs_chunk_array;
1572         }
1573
1574         q_idx = chunk->queue_index;
1575         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1576         q_type = hw_queue_prop->type;
1577
1578         if (!hw_queue_prop->supports_sync_stream) {
1579                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1580                 atomic64_inc(&cntr->validation_drop_cnt);
1581                 dev_err(hdev->dev,
1582                         "Queue index %d does not support sync stream operations\n",
1583                         q_idx);
1584                 rc = -EINVAL;
1585                 goto free_cs_chunk_array;
1586         }
1587
1588         if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1589                 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1590                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1591                         atomic64_inc(&cntr->validation_drop_cnt);
1592                         dev_err(hdev->dev,
1593                                 "Queue index %d is invalid\n", q_idx);
1594                         rc = -EINVAL;
1595                         goto free_cs_chunk_array;
1596                 }
1597
1598                 collective_engine_id = chunk->collective_engine_id;
1599         }
1600
1601         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1602                 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
1603                 if (rc)
1604                         goto free_cs_chunk_array;
1605
1606                 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
1607                 if (IS_ERR(sig_fence)) {
1608                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1609                         atomic64_inc(&cntr->validation_drop_cnt);
1610                         dev_err(hdev->dev,
1611                                 "Failed to get signal CS with seq 0x%llx\n",
1612                                 signal_seq);
1613                         rc = PTR_ERR(sig_fence);
1614                         goto free_cs_chunk_array;
1615                 }
1616
1617                 if (!sig_fence) {
1618                         /* signal CS already finished */
1619                         rc = 0;
1620                         goto free_cs_chunk_array;
1621                 }
1622
1623                 sig_waitcs_cmpl =
1624                         container_of(sig_fence, struct hl_cs_compl, base_fence);
1625
1626                 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
1627                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1628                         atomic64_inc(&cntr->validation_drop_cnt);
1629                         dev_err(hdev->dev,
1630                                 "CS seq 0x%llx is not of a signal CS\n",
1631                                 signal_seq);
1632                         hl_fence_put(sig_fence);
1633                         rc = -EINVAL;
1634                         goto free_cs_chunk_array;
1635                 }
1636
1637                 if (completion_done(&sig_fence->completion)) {
1638                         /* signal CS already finished */
1639                         hl_fence_put(sig_fence);
1640                         rc = 0;
1641                         goto free_cs_chunk_array;
1642                 }
1643         }
1644
1645         rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs);
1646         if (rc) {
1647                 if (cs_type == CS_TYPE_WAIT ||
1648                         cs_type == CS_TYPE_COLLECTIVE_WAIT)
1649                         hl_fence_put(sig_fence);
1650                 goto free_cs_chunk_array;
1651         }
1652
1653         cs->timestamp = !!timestamp;
1654
1655         /*
1656          * Save the signal CS fence for later initialization right before
1657          * hanging the wait CS on the queue.
1658          */
1659         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
1660                 cs->signal_fence = sig_fence;
1661
1662         hl_debugfs_add_cs(cs);
1663
1664         *cs_seq = cs->sequence;
1665
1666         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
1667                 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
1668                                 q_idx);
1669         else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
1670                 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
1671                                 cs, q_idx, collective_engine_id);
1672         else {
1673                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1674                 atomic64_inc(&cntr->validation_drop_cnt);
1675                 rc = -EINVAL;
1676         }
1677
1678         if (rc)
1679                 goto free_cs_object;
1680
1681         rc = hl_hw_queue_schedule_cs(cs);
1682         if (rc) {
1683                 if (rc != -EAGAIN)
1684                         dev_err(hdev->dev,
1685                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1686                                 ctx->asid, cs->sequence, rc);
1687                 goto free_cs_object;
1688         }
1689
1690         rc = HL_CS_STATUS_SUCCESS;
1691         goto put_cs;
1692
1693 free_cs_object:
1694         cs_rollback(hdev, cs);
1695         *cs_seq = ULLONG_MAX;
1696         /* The path below is both for good and erroneous exits */
1697 put_cs:
1698         /* We finished with the CS in this function, so put the ref */
1699         cs_put(cs);
1700 free_cs_chunk_array:
1701         kfree(cs_chunk_array);
1702 out:
1703         return rc;
1704 }
1705
1706 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
1707 {
1708         union hl_cs_args *args = data;
1709         enum hl_cs_type cs_type;
1710         u64 cs_seq = ULONG_MAX;
1711         void __user *chunks;
1712         u32 num_chunks, flags;
1713         int rc;
1714
1715         rc = hl_cs_sanity_checks(hpriv, args);
1716         if (rc)
1717                 goto out;
1718
1719         rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
1720         if (rc)
1721                 goto out;
1722
1723         rc = hl_submit_pending_cb(hpriv);
1724         if (rc)
1725                 goto out;
1726
1727         cs_type = hl_cs_get_cs_type(args->in.cs_flags &
1728                                         ~HL_CS_FLAGS_FORCE_RESTORE);
1729         chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
1730         num_chunks = args->in.num_chunks_execute;
1731         flags = args->in.cs_flags;
1732
1733         /* In case this is a staged CS, user should supply the CS sequence */
1734         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1735                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1736                 cs_seq = args->in.seq;
1737
1738         switch (cs_type) {
1739         case CS_TYPE_SIGNAL:
1740         case CS_TYPE_WAIT:
1741         case CS_TYPE_COLLECTIVE_WAIT:
1742                 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
1743                         &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
1744                 break;
1745         default:
1746                 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
1747                                                         args->in.cs_flags);
1748                 break;
1749         }
1750
1751 out:
1752         if (rc != -EAGAIN) {
1753                 memset(args, 0, sizeof(*args));
1754                 args->out.status = rc;
1755                 args->out.seq = cs_seq;
1756         }
1757
1758         return rc;
1759 }
1760
1761 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
1762                                 u64 timeout_us, u64 seq,
1763                                 enum hl_cs_wait_status *status, s64 *timestamp)
1764 {
1765         struct hl_fence *fence;
1766         unsigned long timeout;
1767         int rc = 0;
1768         long completion_rc;
1769
1770         if (timestamp)
1771                 *timestamp = 0;
1772
1773         if (timeout_us == MAX_SCHEDULE_TIMEOUT)
1774                 timeout = timeout_us;
1775         else
1776                 timeout = usecs_to_jiffies(timeout_us);
1777
1778         hl_ctx_get(hdev, ctx);
1779
1780         fence = hl_ctx_get_fence(ctx, seq);
1781         if (IS_ERR(fence)) {
1782                 rc = PTR_ERR(fence);
1783                 if (rc == -EINVAL)
1784                         dev_notice_ratelimited(hdev->dev,
1785                                 "Can't wait on CS %llu because current CS is at seq %llu\n",
1786                                 seq, ctx->cs_sequence);
1787         } else if (fence) {
1788                 if (!timeout_us)
1789                         completion_rc = completion_done(&fence->completion);
1790                 else
1791                         completion_rc =
1792                                 wait_for_completion_interruptible_timeout(
1793                                         &fence->completion, timeout);
1794
1795                 if (completion_rc > 0) {
1796                         *status = CS_WAIT_STATUS_COMPLETED;
1797                         if (timestamp)
1798                                 *timestamp = ktime_to_ns(fence->timestamp);
1799                 } else {
1800                         *status = CS_WAIT_STATUS_BUSY;
1801                 }
1802
1803                 if (fence->error == -ETIMEDOUT)
1804                         rc = -ETIMEDOUT;
1805                 else if (fence->error == -EIO)
1806                         rc = -EIO;
1807
1808                 hl_fence_put(fence);
1809         } else {
1810                 dev_dbg(hdev->dev,
1811                         "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
1812                         seq, ctx->cs_sequence);
1813                 *status = CS_WAIT_STATUS_GONE;
1814         }
1815
1816         hl_ctx_put(ctx);
1817
1818         return rc;
1819 }
1820
1821 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
1822 {
1823         struct hl_device *hdev = hpriv->hdev;
1824         union hl_wait_cs_args *args = data;
1825         enum hl_cs_wait_status status;
1826         u64 seq = args->in.seq;
1827         s64 timestamp;
1828         int rc;
1829
1830         rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
1831                                 &status, &timestamp);
1832
1833         memset(args, 0, sizeof(*args));
1834
1835         if (rc) {
1836                 if (rc == -ERESTARTSYS) {
1837                         dev_err_ratelimited(hdev->dev,
1838                                 "user process got signal while waiting for CS handle %llu\n",
1839                                 seq);
1840                         args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
1841                         rc = -EINTR;
1842                 } else if (rc == -ETIMEDOUT) {
1843                         dev_err_ratelimited(hdev->dev,
1844                                 "CS %llu has timed-out while user process is waiting for it\n",
1845                                 seq);
1846                         args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
1847                 } else if (rc == -EIO) {
1848                         dev_err_ratelimited(hdev->dev,
1849                                 "CS %llu has been aborted while user process is waiting for it\n",
1850                                 seq);
1851                         args->out.status = HL_WAIT_CS_STATUS_ABORTED;
1852                 }
1853                 return rc;
1854         }
1855
1856         if (timestamp) {
1857                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
1858                 args->out.timestamp_nsec = timestamp;
1859         }
1860
1861         switch (status) {
1862         case CS_WAIT_STATUS_GONE:
1863                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
1864                 fallthrough;
1865         case CS_WAIT_STATUS_COMPLETED:
1866                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
1867                 break;
1868         case CS_WAIT_STATUS_BUSY:
1869         default:
1870                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
1871                 break;
1872         }
1873
1874         return 0;
1875 }