Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / accel / habanalabs / common / command_submission.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2021 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK   (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15                         HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
16                         HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
17                         HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
18
19
20 #define MAX_TS_ITER_NUM 10
21
22 /**
23  * enum hl_cs_wait_status - cs wait status
24  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
25  * @CS_WAIT_STATUS_COMPLETED: cs completed
26  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
27  */
28 enum hl_cs_wait_status {
29         CS_WAIT_STATUS_BUSY,
30         CS_WAIT_STATUS_COMPLETED,
31         CS_WAIT_STATUS_GONE
32 };
33
34 static void job_wq_completion(struct work_struct *work);
35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
36                                 enum hl_cs_wait_status *status, s64 *timestamp);
37 static void cs_do_release(struct kref *ref);
38
39 static void hl_push_cs_outcome(struct hl_device *hdev,
40                                struct hl_cs_outcome_store *outcome_store,
41                                u64 seq, ktime_t ts, int error)
42 {
43         struct hl_cs_outcome *node;
44         unsigned long flags;
45
46         /*
47          * CS outcome store supports the following operations:
48          * push outcome - store a recent CS outcome in the store
49          * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
50          * It uses 2 lists: used list and free list.
51          * It has a pre-allocated amount of nodes, each node stores
52          * a single CS outcome.
53          * Initially, all the nodes are in the free list.
54          * On push outcome, a node (any) is taken from the free list, its
55          * information is filled in, and the node is moved to the used list.
56          * It is possible, that there are no nodes left in the free list.
57          * In this case, we will lose some information about old outcomes. We
58          * will pop the OLDEST node from the used list, and make it free.
59          * On pop, the node is searched for in the used list (using a search
60          * index).
61          * If found, the node is then removed from the used list, and moved
62          * back to the free list. The outcome data that the node contained is
63          * returned back to the user.
64          */
65
66         spin_lock_irqsave(&outcome_store->db_lock, flags);
67
68         if (list_empty(&outcome_store->free_list)) {
69                 node = list_last_entry(&outcome_store->used_list,
70                                        struct hl_cs_outcome, list_link);
71                 hash_del(&node->map_link);
72                 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
73         } else {
74                 node = list_last_entry(&outcome_store->free_list,
75                                        struct hl_cs_outcome, list_link);
76         }
77
78         list_del_init(&node->list_link);
79
80         node->seq = seq;
81         node->ts = ts;
82         node->error = error;
83
84         list_add(&node->list_link, &outcome_store->used_list);
85         hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
86
87         spin_unlock_irqrestore(&outcome_store->db_lock, flags);
88 }
89
90 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
91                                u64 seq, ktime_t *ts, int *error)
92 {
93         struct hl_cs_outcome *node;
94         unsigned long flags;
95
96         spin_lock_irqsave(&outcome_store->db_lock, flags);
97
98         hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
99                 if (node->seq == seq) {
100                         *ts = node->ts;
101                         *error = node->error;
102
103                         hash_del(&node->map_link);
104                         list_del_init(&node->list_link);
105                         list_add(&node->list_link, &outcome_store->free_list);
106
107                         spin_unlock_irqrestore(&outcome_store->db_lock, flags);
108
109                         return true;
110                 }
111
112         spin_unlock_irqrestore(&outcome_store->db_lock, flags);
113
114         return false;
115 }
116
117 static void hl_sob_reset(struct kref *ref)
118 {
119         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
120                                                         kref);
121         struct hl_device *hdev = hw_sob->hdev;
122
123         dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
124
125         hdev->asic_funcs->reset_sob(hdev, hw_sob);
126
127         hw_sob->need_reset = false;
128 }
129
130 void hl_sob_reset_error(struct kref *ref)
131 {
132         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
133                                                         kref);
134         struct hl_device *hdev = hw_sob->hdev;
135
136         dev_crit(hdev->dev,
137                 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
138                 hw_sob->q_idx, hw_sob->sob_id);
139 }
140
141 void hw_sob_put(struct hl_hw_sob *hw_sob)
142 {
143         if (hw_sob)
144                 kref_put(&hw_sob->kref, hl_sob_reset);
145 }
146
147 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
148 {
149         if (hw_sob)
150                 kref_put(&hw_sob->kref, hl_sob_reset_error);
151 }
152
153 void hw_sob_get(struct hl_hw_sob *hw_sob)
154 {
155         if (hw_sob)
156                 kref_get(&hw_sob->kref);
157 }
158
159 /**
160  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
161  * @sob_base: sob base id
162  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
163  * @mask: generated mask
164  *
165  * Return: 0 if given parameters are valid
166  */
167 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
168 {
169         int i;
170
171         if (sob_mask == 0)
172                 return -EINVAL;
173
174         if (sob_mask == 0x1) {
175                 *mask = ~(1 << (sob_base & 0x7));
176         } else {
177                 /* find msb in order to verify sob range is valid */
178                 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
179                         if (BIT(i) & sob_mask)
180                                 break;
181
182                 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
183                         return -EINVAL;
184
185                 *mask = ~sob_mask;
186         }
187
188         return 0;
189 }
190
191 static void hl_fence_release(struct kref *kref)
192 {
193         struct hl_fence *fence =
194                 container_of(kref, struct hl_fence, refcount);
195         struct hl_cs_compl *hl_cs_cmpl =
196                 container_of(fence, struct hl_cs_compl, base_fence);
197
198         kfree(hl_cs_cmpl);
199 }
200
201 void hl_fence_put(struct hl_fence *fence)
202 {
203         if (IS_ERR_OR_NULL(fence))
204                 return;
205         kref_put(&fence->refcount, hl_fence_release);
206 }
207
208 void hl_fences_put(struct hl_fence **fence, int len)
209 {
210         int i;
211
212         for (i = 0; i < len; i++, fence++)
213                 hl_fence_put(*fence);
214 }
215
216 void hl_fence_get(struct hl_fence *fence)
217 {
218         if (fence)
219                 kref_get(&fence->refcount);
220 }
221
222 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
223 {
224         kref_init(&fence->refcount);
225         fence->cs_sequence = sequence;
226         fence->error = 0;
227         fence->timestamp = ktime_set(0, 0);
228         fence->mcs_handling_done = false;
229         init_completion(&fence->completion);
230 }
231
232 void cs_get(struct hl_cs *cs)
233 {
234         kref_get(&cs->refcount);
235 }
236
237 static int cs_get_unless_zero(struct hl_cs *cs)
238 {
239         return kref_get_unless_zero(&cs->refcount);
240 }
241
242 static void cs_put(struct hl_cs *cs)
243 {
244         kref_put(&cs->refcount, cs_do_release);
245 }
246
247 static void cs_job_do_release(struct kref *ref)
248 {
249         struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
250
251         kfree(job);
252 }
253
254 static void hl_cs_job_put(struct hl_cs_job *job)
255 {
256         kref_put(&job->refcount, cs_job_do_release);
257 }
258
259 bool cs_needs_completion(struct hl_cs *cs)
260 {
261         /* In case this is a staged CS, only the last CS in sequence should
262          * get a completion, any non staged CS will always get a completion
263          */
264         if (cs->staged_cs && !cs->staged_last)
265                 return false;
266
267         return true;
268 }
269
270 bool cs_needs_timeout(struct hl_cs *cs)
271 {
272         /* In case this is a staged CS, only the first CS in sequence should
273          * get a timeout, any non staged CS will always get a timeout
274          */
275         if (cs->staged_cs && !cs->staged_first)
276                 return false;
277
278         return true;
279 }
280
281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
282 {
283         /*
284          * Patched CB is created for external queues jobs, and for H/W queues
285          * jobs if the user CB was allocated by driver and MMU is disabled.
286          */
287         return (job->queue_type == QUEUE_TYPE_EXT ||
288                         (job->queue_type == QUEUE_TYPE_HW &&
289                                         job->is_kernel_allocated_cb &&
290                                         !hdev->mmu_enable));
291 }
292
293 /*
294  * cs_parser - parse the user command submission
295  *
296  * @hpriv       : pointer to the private data of the fd
297  * @job        : pointer to the job that holds the command submission info
298  *
299  * The function parses the command submission of the user. It calls the
300  * ASIC specific parser, which returns a list of memory blocks to send
301  * to the device as different command buffers
302  *
303  */
304 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
305 {
306         struct hl_device *hdev = hpriv->hdev;
307         struct hl_cs_parser parser;
308         int rc;
309
310         parser.ctx_id = job->cs->ctx->asid;
311         parser.cs_sequence = job->cs->sequence;
312         parser.job_id = job->id;
313
314         parser.hw_queue_id = job->hw_queue_id;
315         parser.job_userptr_list = &job->userptr_list;
316         parser.patched_cb = NULL;
317         parser.user_cb = job->user_cb;
318         parser.user_cb_size = job->user_cb_size;
319         parser.queue_type = job->queue_type;
320         parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
321         job->patched_cb = NULL;
322         parser.completion = cs_needs_completion(job->cs);
323
324         rc = hdev->asic_funcs->cs_parser(hdev, &parser);
325
326         if (is_cb_patched(hdev, job)) {
327                 if (!rc) {
328                         job->patched_cb = parser.patched_cb;
329                         job->job_cb_size = parser.patched_cb_size;
330                         job->contains_dma_pkt = parser.contains_dma_pkt;
331                         atomic_inc(&job->patched_cb->cs_cnt);
332                 }
333
334                 /*
335                  * Whether the parsing worked or not, we don't need the
336                  * original CB anymore because it was already parsed and
337                  * won't be accessed again for this CS
338                  */
339                 atomic_dec(&job->user_cb->cs_cnt);
340                 hl_cb_put(job->user_cb);
341                 job->user_cb = NULL;
342         } else if (!rc) {
343                 job->job_cb_size = job->user_cb_size;
344         }
345
346         return rc;
347 }
348
349 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
350 {
351         struct hl_cs *cs = job->cs;
352
353         if (is_cb_patched(hdev, job)) {
354                 hl_userptr_delete_list(hdev, &job->userptr_list);
355
356                 /*
357                  * We might arrive here from rollback and patched CB wasn't
358                  * created, so we need to check it's not NULL
359                  */
360                 if (job->patched_cb) {
361                         atomic_dec(&job->patched_cb->cs_cnt);
362                         hl_cb_put(job->patched_cb);
363                 }
364         }
365
366         /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
367          * enabled, the user CB isn't released in cs_parser() and thus should be
368          * released here. This is also true for INT queues jobs which were
369          * allocated by driver.
370          */
371         if ((job->is_kernel_allocated_cb &&
372                 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
373                                 job->queue_type == QUEUE_TYPE_INT))) {
374                 atomic_dec(&job->user_cb->cs_cnt);
375                 hl_cb_put(job->user_cb);
376         }
377
378         /*
379          * This is the only place where there can be multiple threads
380          * modifying the list at the same time
381          */
382         spin_lock(&cs->job_lock);
383         list_del(&job->cs_node);
384         spin_unlock(&cs->job_lock);
385
386         hl_debugfs_remove_job(hdev, job);
387
388         /* We decrement reference only for a CS that gets completion
389          * because the reference was incremented only for this kind of CS
390          * right before it was scheduled.
391          *
392          * In staged submission, only the last CS marked as 'staged_last'
393          * gets completion, hence its release function will be called from here.
394          * As for all the rest CS's in the staged submission which do not get
395          * completion, their CS reference will be decremented by the
396          * 'staged_last' CS during the CS release flow.
397          * All relevant PQ CI counters will be incremented during the CS release
398          * flow by calling 'hl_hw_queue_update_ci'.
399          */
400         if (cs_needs_completion(cs) &&
401                         (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
402
403                 /* In CS based completions, the timestamp is already available,
404                  * so no need to extract it from job
405                  */
406                 if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
407                         cs->completion_timestamp = job->timestamp;
408
409                 cs_put(cs);
410         }
411
412         hl_cs_job_put(job);
413 }
414
415 /*
416  * hl_staged_cs_find_first - locate the first CS in this staged submission
417  *
418  * @hdev: pointer to device structure
419  * @cs_seq: staged submission sequence number
420  *
421  * @note: This function must be called under 'hdev->cs_mirror_lock'
422  *
423  * Find and return a CS pointer with the given sequence
424  */
425 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
426 {
427         struct hl_cs *cs;
428
429         list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
430                 if (cs->staged_cs && cs->staged_first &&
431                                 cs->sequence == cs_seq)
432                         return cs;
433
434         return NULL;
435 }
436
437 /*
438  * is_staged_cs_last_exists - returns true if the last CS in sequence exists
439  *
440  * @hdev: pointer to device structure
441  * @cs: staged submission member
442  *
443  */
444 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
445 {
446         struct hl_cs *last_entry;
447
448         last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
449                                                                 staged_cs_node);
450
451         if (last_entry->staged_last)
452                 return true;
453
454         return false;
455 }
456
457 /*
458  * staged_cs_get - get CS reference if this CS is a part of a staged CS
459  *
460  * @hdev: pointer to device structure
461  * @cs: current CS
462  * @cs_seq: staged submission sequence number
463  *
464  * Increment CS reference for every CS in this staged submission except for
465  * the CS which get completion.
466  */
467 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
468 {
469         /* Only the last CS in this staged submission will get a completion.
470          * We must increment the reference for all other CS's in this
471          * staged submission.
472          * Once we get a completion we will release the whole staged submission.
473          */
474         if (!cs->staged_last)
475                 cs_get(cs);
476 }
477
478 /*
479  * staged_cs_put - put a CS in case it is part of staged submission
480  *
481  * @hdev: pointer to device structure
482  * @cs: CS to put
483  *
484  * This function decrements a CS reference (for a non completion CS)
485  */
486 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
487 {
488         /* We release all CS's in a staged submission except the last
489          * CS which we have never incremented its reference.
490          */
491         if (!cs_needs_completion(cs))
492                 cs_put(cs);
493 }
494
495 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
496 {
497         struct hl_cs *next = NULL, *iter, *first_cs;
498
499         if (!cs_needs_timeout(cs))
500                 return;
501
502         spin_lock(&hdev->cs_mirror_lock);
503
504         /* We need to handle tdr only once for the complete staged submission.
505          * Hence, we choose the CS that reaches this function first which is
506          * the CS marked as 'staged_last'.
507          * In case single staged cs was submitted which has both first and last
508          * indications, then "cs_find_first" below will return NULL, since we
509          * removed the cs node from the list before getting here,
510          * in such cases just continue with the cs to cancel it's TDR work.
511          */
512         if (cs->staged_cs && cs->staged_last) {
513                 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
514                 if (first_cs)
515                         cs = first_cs;
516         }
517
518         spin_unlock(&hdev->cs_mirror_lock);
519
520         /* Don't cancel TDR in case this CS was timedout because we might be
521          * running from the TDR context
522          */
523         if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
524                 return;
525
526         if (cs->tdr_active)
527                 cancel_delayed_work_sync(&cs->work_tdr);
528
529         spin_lock(&hdev->cs_mirror_lock);
530
531         /* queue TDR for next CS */
532         list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
533                 if (cs_needs_timeout(iter)) {
534                         next = iter;
535                         break;
536                 }
537
538         if (next && !next->tdr_active) {
539                 next->tdr_active = true;
540                 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
541         }
542
543         spin_unlock(&hdev->cs_mirror_lock);
544 }
545
546 /*
547  * force_complete_multi_cs - complete all contexts that wait on multi-CS
548  *
549  * @hdev: pointer to habanalabs device structure
550  */
551 static void force_complete_multi_cs(struct hl_device *hdev)
552 {
553         int i;
554
555         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
556                 struct multi_cs_completion *mcs_compl;
557
558                 mcs_compl = &hdev->multi_cs_completion[i];
559
560                 spin_lock(&mcs_compl->lock);
561
562                 if (!mcs_compl->used) {
563                         spin_unlock(&mcs_compl->lock);
564                         continue;
565                 }
566
567                 /* when calling force complete no context should be waiting on
568                  * multi-cS.
569                  * We are calling the function as a protection for such case
570                  * to free any pending context and print error message
571                  */
572                 dev_err(hdev->dev,
573                                 "multi-CS completion context %d still waiting when calling force completion\n",
574                                 i);
575                 complete_all(&mcs_compl->completion);
576                 spin_unlock(&mcs_compl->lock);
577         }
578 }
579
580 /*
581  * complete_multi_cs - complete all waiting entities on multi-CS
582  *
583  * @hdev: pointer to habanalabs device structure
584  * @cs: CS structure
585  * The function signals a waiting entity that has an overlapping stream masters
586  * with the completed CS.
587  * For example:
588  * - a completed CS worked on stream master QID 4, multi CS completion
589  *   is actively waiting on stream master QIDs 3, 5. don't send signal as no
590  *   common stream master QID
591  * - a completed CS worked on stream master QID 4, multi CS completion
592  *   is actively waiting on stream master QIDs 3, 4. send signal as stream
593  *   master QID 4 is common
594  */
595 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
596 {
597         struct hl_fence *fence = cs->fence;
598         int i;
599
600         /* in case of multi CS check for completion only for the first CS */
601         if (cs->staged_cs && !cs->staged_first)
602                 return;
603
604         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
605                 struct multi_cs_completion *mcs_compl;
606
607                 mcs_compl = &hdev->multi_cs_completion[i];
608                 if (!mcs_compl->used)
609                         continue;
610
611                 spin_lock(&mcs_compl->lock);
612
613                 /*
614                  * complete if:
615                  * 1. still waiting for completion
616                  * 2. the completed CS has at least one overlapping stream
617                  *    master with the stream masters in the completion
618                  */
619                 if (mcs_compl->used &&
620                                 (fence->stream_master_qid_map &
621                                         mcs_compl->stream_master_qid_map)) {
622                         /* extract the timestamp only of first completed CS */
623                         if (!mcs_compl->timestamp)
624                                 mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
625
626                         complete_all(&mcs_compl->completion);
627
628                         /*
629                          * Setting mcs_handling_done inside the lock ensures
630                          * at least one fence have mcs_handling_done set to
631                          * true before wait for mcs finish. This ensures at
632                          * least one CS will be set as completed when polling
633                          * mcs fences.
634                          */
635                         fence->mcs_handling_done = true;
636                 }
637
638                 spin_unlock(&mcs_compl->lock);
639         }
640         /* In case CS completed without mcs completion initialized */
641         fence->mcs_handling_done = true;
642 }
643
644 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
645                                         struct hl_cs *cs,
646                                         struct hl_cs_compl *hl_cs_cmpl)
647 {
648         /* Skip this handler if the cs wasn't submitted, to avoid putting
649          * the hw_sob twice, since this case already handled at this point,
650          * also skip if the hw_sob pointer wasn't set.
651          */
652         if (!hl_cs_cmpl->hw_sob || !cs->submitted)
653                 return;
654
655         spin_lock(&hl_cs_cmpl->lock);
656
657         /*
658          * we get refcount upon reservation of signals or signal/wait cs for the
659          * hw_sob object, and need to put it when the first staged cs
660          * (which cotains the encaps signals) or cs signal/wait is completed.
661          */
662         if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
663                         (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
664                         (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
665                         (!!hl_cs_cmpl->encaps_signals)) {
666                 dev_dbg(hdev->dev,
667                                 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
668                                 hl_cs_cmpl->cs_seq,
669                                 hl_cs_cmpl->type,
670                                 hl_cs_cmpl->hw_sob->sob_id,
671                                 hl_cs_cmpl->sob_val);
672
673                 hw_sob_put(hl_cs_cmpl->hw_sob);
674
675                 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
676                         hdev->asic_funcs->reset_sob_group(hdev,
677                                         hl_cs_cmpl->sob_group);
678         }
679
680         spin_unlock(&hl_cs_cmpl->lock);
681 }
682
683 static void cs_do_release(struct kref *ref)
684 {
685         struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
686         struct hl_device *hdev = cs->ctx->hdev;
687         struct hl_cs_job *job, *tmp;
688         struct hl_cs_compl *hl_cs_cmpl =
689                         container_of(cs->fence, struct hl_cs_compl, base_fence);
690
691         cs->completed = true;
692
693         /*
694          * Although if we reached here it means that all external jobs have
695          * finished, because each one of them took refcnt to CS, we still
696          * need to go over the internal jobs and complete them. Otherwise, we
697          * will have leaked memory and what's worse, the CS object (and
698          * potentially the CTX object) could be released, while the JOB
699          * still holds a pointer to them (but no reference).
700          */
701         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
702                 hl_complete_job(hdev, job);
703
704         if (!cs->submitted) {
705                 /*
706                  * In case the wait for signal CS was submitted, the fence put
707                  * occurs in init_signal_wait_cs() or collective_wait_init_cs()
708                  * right before hanging on the PQ.
709                  */
710                 if (cs->type == CS_TYPE_WAIT ||
711                                 cs->type == CS_TYPE_COLLECTIVE_WAIT)
712                         hl_fence_put(cs->signal_fence);
713
714                 goto out;
715         }
716
717         /* Need to update CI for all queue jobs that does not get completion */
718         hl_hw_queue_update_ci(cs);
719
720         /* remove CS from CS mirror list */
721         spin_lock(&hdev->cs_mirror_lock);
722         list_del_init(&cs->mirror_node);
723         spin_unlock(&hdev->cs_mirror_lock);
724
725         cs_handle_tdr(hdev, cs);
726
727         if (cs->staged_cs) {
728                 /* the completion CS decrements reference for the entire
729                  * staged submission
730                  */
731                 if (cs->staged_last) {
732                         struct hl_cs *staged_cs, *tmp_cs;
733
734                         list_for_each_entry_safe(staged_cs, tmp_cs,
735                                         &cs->staged_cs_node, staged_cs_node)
736                                 staged_cs_put(hdev, staged_cs);
737                 }
738
739                 /* A staged CS will be a member in the list only after it
740                  * was submitted. We used 'cs_mirror_lock' when inserting
741                  * it to list so we will use it again when removing it
742                  */
743                 if (cs->submitted) {
744                         spin_lock(&hdev->cs_mirror_lock);
745                         list_del(&cs->staged_cs_node);
746                         spin_unlock(&hdev->cs_mirror_lock);
747                 }
748
749                 /* decrement refcount to handle when first staged cs
750                  * with encaps signals is completed.
751                  */
752                 if (hl_cs_cmpl->encaps_signals)
753                         kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
754                                         hl_encaps_release_handle_and_put_ctx);
755         }
756
757         if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
758                 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
759
760 out:
761         /* Must be called before hl_ctx_put because inside we use ctx to get
762          * the device
763          */
764         hl_debugfs_remove_cs(cs);
765
766         hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
767
768         /* We need to mark an error for not submitted because in that case
769          * the hl fence release flow is different. Mainly, we don't need
770          * to handle hw_sob for signal/wait
771          */
772         if (cs->timedout)
773                 cs->fence->error = -ETIMEDOUT;
774         else if (cs->aborted)
775                 cs->fence->error = -EIO;
776         else if (!cs->submitted)
777                 cs->fence->error = -EBUSY;
778
779         if (unlikely(cs->skip_reset_on_timeout)) {
780                 dev_err(hdev->dev,
781                         "Command submission %llu completed after %llu (s)\n",
782                         cs->sequence,
783                         div_u64(jiffies - cs->submission_time_jiffies, HZ));
784         }
785
786         if (cs->timestamp) {
787                 cs->fence->timestamp = cs->completion_timestamp;
788                 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
789                                    cs->fence->timestamp, cs->fence->error);
790         }
791
792         hl_ctx_put(cs->ctx);
793
794         complete_all(&cs->fence->completion);
795         complete_multi_cs(hdev, cs);
796
797         cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
798
799         hl_fence_put(cs->fence);
800
801         kfree(cs->jobs_in_queue_cnt);
802         kfree(cs);
803 }
804
805 static void cs_timedout(struct work_struct *work)
806 {
807         struct hl_device *hdev;
808         u64 event_mask = 0x0;
809         int rc;
810         struct hl_cs *cs = container_of(work, struct hl_cs,
811                                                  work_tdr.work);
812         bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
813
814         rc = cs_get_unless_zero(cs);
815         if (!rc)
816                 return;
817
818         if ((!cs->submitted) || (cs->completed)) {
819                 cs_put(cs);
820                 return;
821         }
822
823         hdev = cs->ctx->hdev;
824
825         if (likely(!skip_reset_on_timeout)) {
826                 if (hdev->reset_on_lockup)
827                         device_reset = true;
828                 else
829                         hdev->reset_info.needs_reset = true;
830
831                 /* Mark the CS is timed out so we won't try to cancel its TDR */
832                 cs->timedout = true;
833         }
834
835         /* Save only the first CS timeout parameters */
836         rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
837         if (rc) {
838                 hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
839                 hdev->captured_err_info.cs_timeout.seq = cs->sequence;
840                 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
841         }
842
843         switch (cs->type) {
844         case CS_TYPE_SIGNAL:
845                 dev_err(hdev->dev,
846                         "Signal command submission %llu has not finished in time!\n",
847                         cs->sequence);
848                 break;
849
850         case CS_TYPE_WAIT:
851                 dev_err(hdev->dev,
852                         "Wait command submission %llu has not finished in time!\n",
853                         cs->sequence);
854                 break;
855
856         case CS_TYPE_COLLECTIVE_WAIT:
857                 dev_err(hdev->dev,
858                         "Collective Wait command submission %llu has not finished in time!\n",
859                         cs->sequence);
860                 break;
861
862         default:
863                 dev_err(hdev->dev,
864                         "Command submission %llu has not finished in time!\n",
865                         cs->sequence);
866                 break;
867         }
868
869         rc = hl_state_dump(hdev);
870         if (rc)
871                 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
872
873         cs_put(cs);
874
875         if (device_reset) {
876                 event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
877                 hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
878         } else if (event_mask) {
879                 hl_notifier_event_send_all(hdev, event_mask);
880         }
881 }
882
883 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
884                         enum hl_cs_type cs_type, u64 user_sequence,
885                         struct hl_cs **cs_new, u32 flags, u32 timeout)
886 {
887         struct hl_cs_counters_atomic *cntr;
888         struct hl_fence *other = NULL;
889         struct hl_cs_compl *cs_cmpl;
890         struct hl_cs *cs;
891         int rc;
892
893         cntr = &hdev->aggregated_cs_counters;
894
895         cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
896         if (!cs)
897                 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
898
899         if (!cs) {
900                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
901                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
902                 return -ENOMEM;
903         }
904
905         /* increment refcnt for context */
906         hl_ctx_get(ctx);
907
908         cs->ctx = ctx;
909         cs->submitted = false;
910         cs->completed = false;
911         cs->type = cs_type;
912         cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
913         cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
914         cs->timeout_jiffies = timeout;
915         cs->skip_reset_on_timeout =
916                 hdev->reset_info.skip_reset_on_timeout ||
917                 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
918         cs->submission_time_jiffies = jiffies;
919         INIT_LIST_HEAD(&cs->job_list);
920         INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
921         kref_init(&cs->refcount);
922         spin_lock_init(&cs->job_lock);
923
924         cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
925         if (!cs_cmpl)
926                 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
927
928         if (!cs_cmpl) {
929                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
930                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
931                 rc = -ENOMEM;
932                 goto free_cs;
933         }
934
935         cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
936                         sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
937         if (!cs->jobs_in_queue_cnt)
938                 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
939                                 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
940
941         if (!cs->jobs_in_queue_cnt) {
942                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
943                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
944                 rc = -ENOMEM;
945                 goto free_cs_cmpl;
946         }
947
948         cs_cmpl->hdev = hdev;
949         cs_cmpl->type = cs->type;
950         spin_lock_init(&cs_cmpl->lock);
951         cs->fence = &cs_cmpl->base_fence;
952
953         spin_lock(&ctx->cs_lock);
954
955         cs_cmpl->cs_seq = ctx->cs_sequence;
956         other = ctx->cs_pending[cs_cmpl->cs_seq &
957                                 (hdev->asic_prop.max_pending_cs - 1)];
958
959         if (other && !completion_done(&other->completion)) {
960                 /* If the following statement is true, it means we have reached
961                  * a point in which only part of the staged submission was
962                  * submitted and we don't have enough room in the 'cs_pending'
963                  * array for the rest of the submission.
964                  * This causes a deadlock because this CS will never be
965                  * completed as it depends on future CS's for completion.
966                  */
967                 if (other->cs_sequence == user_sequence)
968                         dev_crit_ratelimited(hdev->dev,
969                                 "Staged CS %llu deadlock due to lack of resources",
970                                 user_sequence);
971
972                 dev_dbg_ratelimited(hdev->dev,
973                         "Rejecting CS because of too many in-flights CS\n");
974                 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
975                 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
976                 rc = -EAGAIN;
977                 goto free_fence;
978         }
979
980         /* init hl_fence */
981         hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
982
983         cs->sequence = cs_cmpl->cs_seq;
984
985         ctx->cs_pending[cs_cmpl->cs_seq &
986                         (hdev->asic_prop.max_pending_cs - 1)] =
987                                                         &cs_cmpl->base_fence;
988         ctx->cs_sequence++;
989
990         hl_fence_get(&cs_cmpl->base_fence);
991
992         hl_fence_put(other);
993
994         spin_unlock(&ctx->cs_lock);
995
996         *cs_new = cs;
997
998         return 0;
999
1000 free_fence:
1001         spin_unlock(&ctx->cs_lock);
1002         kfree(cs->jobs_in_queue_cnt);
1003 free_cs_cmpl:
1004         kfree(cs_cmpl);
1005 free_cs:
1006         kfree(cs);
1007         hl_ctx_put(ctx);
1008         return rc;
1009 }
1010
1011 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
1012 {
1013         struct hl_cs_job *job, *tmp;
1014
1015         staged_cs_put(hdev, cs);
1016
1017         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1018                 hl_complete_job(hdev, job);
1019 }
1020
1021 /*
1022  * release_reserved_encaps_signals() - release reserved encapsulated signals.
1023  * @hdev: pointer to habanalabs device structure
1024  *
1025  * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
1026  * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
1027  * For these signals need also to put the refcount of the H/W SOB which was taken at the
1028  * reservation.
1029  */
1030 static void release_reserved_encaps_signals(struct hl_device *hdev)
1031 {
1032         struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
1033         struct hl_cs_encaps_sig_handle *handle;
1034         struct hl_encaps_signals_mgr *mgr;
1035         u32 id;
1036
1037         if (!ctx)
1038                 return;
1039
1040         mgr = &ctx->sig_mgr;
1041
1042         idr_for_each_entry(&mgr->handles, handle, id)
1043                 if (handle->cs_seq == ULLONG_MAX)
1044                         kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
1045
1046         hl_ctx_put(ctx);
1047 }
1048
1049 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
1050 {
1051         int i;
1052         struct hl_cs *cs, *tmp;
1053
1054         if (!skip_wq_flush) {
1055                 flush_workqueue(hdev->ts_free_obj_wq);
1056
1057                 /* flush all completions before iterating over the CS mirror list in
1058                  * order to avoid a race with the release functions
1059                  */
1060                 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1061                         flush_workqueue(hdev->cq_wq[i]);
1062
1063                 flush_workqueue(hdev->cs_cmplt_wq);
1064         }
1065
1066         /* Make sure we don't have leftovers in the CS mirror list */
1067         list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
1068                 cs_get(cs);
1069                 cs->aborted = true;
1070                 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
1071                                         cs->ctx->asid, cs->sequence);
1072                 cs_rollback(hdev, cs);
1073                 cs_put(cs);
1074         }
1075
1076         force_complete_multi_cs(hdev);
1077
1078         release_reserved_encaps_signals(hdev);
1079 }
1080
1081 static void
1082 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
1083 {
1084         struct hl_user_pending_interrupt *pend, *temp;
1085         unsigned long flags;
1086
1087         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
1088         list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
1089                 if (pend->ts_reg_info.buf) {
1090                         list_del(&pend->wait_list_node);
1091                         hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
1092                         hl_cb_put(pend->ts_reg_info.cq_cb);
1093                 } else {
1094                         pend->fence.error = -EIO;
1095                         complete_all(&pend->fence.completion);
1096                 }
1097         }
1098         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
1099 }
1100
1101 void hl_release_pending_user_interrupts(struct hl_device *hdev)
1102 {
1103         struct asic_fixed_properties *prop = &hdev->asic_prop;
1104         struct hl_user_interrupt *interrupt;
1105         int i;
1106
1107         if (!prop->user_interrupt_count)
1108                 return;
1109
1110         /* We iterate through the user interrupt requests and waking up all
1111          * user threads waiting for interrupt completion. We iterate the
1112          * list under a lock, this is why all user threads, once awake,
1113          * will wait on the same lock and will release the waiting object upon
1114          * unlock.
1115          */
1116
1117         for (i = 0 ; i < prop->user_interrupt_count ; i++) {
1118                 interrupt = &hdev->user_interrupt[i];
1119                 wake_pending_user_interrupt_threads(interrupt);
1120         }
1121
1122         interrupt = &hdev->common_user_cq_interrupt;
1123         wake_pending_user_interrupt_threads(interrupt);
1124
1125         interrupt = &hdev->common_decoder_interrupt;
1126         wake_pending_user_interrupt_threads(interrupt);
1127 }
1128
1129 static void force_complete_cs(struct hl_device *hdev)
1130 {
1131         struct hl_cs *cs;
1132
1133         spin_lock(&hdev->cs_mirror_lock);
1134
1135         list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
1136                 cs->fence->error = -EIO;
1137                 complete_all(&cs->fence->completion);
1138         }
1139
1140         spin_unlock(&hdev->cs_mirror_lock);
1141 }
1142
1143 void hl_abort_waitings_for_completion(struct hl_device *hdev)
1144 {
1145         force_complete_cs(hdev);
1146         force_complete_multi_cs(hdev);
1147         hl_release_pending_user_interrupts(hdev);
1148 }
1149
1150 static void job_wq_completion(struct work_struct *work)
1151 {
1152         struct hl_cs_job *job = container_of(work, struct hl_cs_job,
1153                                                 finish_work);
1154         struct hl_cs *cs = job->cs;
1155         struct hl_device *hdev = cs->ctx->hdev;
1156
1157         /* job is no longer needed */
1158         hl_complete_job(hdev, job);
1159 }
1160
1161 static void cs_completion(struct work_struct *work)
1162 {
1163         struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
1164         struct hl_device *hdev = cs->ctx->hdev;
1165         struct hl_cs_job *job, *tmp;
1166
1167         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1168                 hl_complete_job(hdev, job);
1169 }
1170
1171 static int validate_queue_index(struct hl_device *hdev,
1172                                 struct hl_cs_chunk *chunk,
1173                                 enum hl_queue_type *queue_type,
1174                                 bool *is_kernel_allocated_cb)
1175 {
1176         struct asic_fixed_properties *asic = &hdev->asic_prop;
1177         struct hw_queue_properties *hw_queue_prop;
1178
1179         /* This must be checked here to prevent out-of-bounds access to
1180          * hw_queues_props array
1181          */
1182         if (chunk->queue_index >= asic->max_queues) {
1183                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1184                         chunk->queue_index);
1185                 return -EINVAL;
1186         }
1187
1188         hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1189
1190         if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1191                 dev_err(hdev->dev, "Queue index %d is not applicable\n",
1192                         chunk->queue_index);
1193                 return -EINVAL;
1194         }
1195
1196         if (hw_queue_prop->binned) {
1197                 dev_err(hdev->dev, "Queue index %d is binned out\n",
1198                         chunk->queue_index);
1199                 return -EINVAL;
1200         }
1201
1202         if (hw_queue_prop->driver_only) {
1203                 dev_err(hdev->dev,
1204                         "Queue index %d is restricted for the kernel driver\n",
1205                         chunk->queue_index);
1206                 return -EINVAL;
1207         }
1208
1209         /* When hw queue type isn't QUEUE_TYPE_HW,
1210          * USER_ALLOC_CB flag shall be referred as "don't care".
1211          */
1212         if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1213                 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1214                         if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1215                                 dev_err(hdev->dev,
1216                                         "Queue index %d doesn't support user CB\n",
1217                                         chunk->queue_index);
1218                                 return -EINVAL;
1219                         }
1220
1221                         *is_kernel_allocated_cb = false;
1222                 } else {
1223                         if (!(hw_queue_prop->cb_alloc_flags &
1224                                         CB_ALLOC_KERNEL)) {
1225                                 dev_err(hdev->dev,
1226                                         "Queue index %d doesn't support kernel CB\n",
1227                                         chunk->queue_index);
1228                                 return -EINVAL;
1229                         }
1230
1231                         *is_kernel_allocated_cb = true;
1232                 }
1233         } else {
1234                 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1235                                                 & CB_ALLOC_KERNEL);
1236         }
1237
1238         *queue_type = hw_queue_prop->type;
1239         return 0;
1240 }
1241
1242 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1243                                         struct hl_mem_mgr *mmg,
1244                                         struct hl_cs_chunk *chunk)
1245 {
1246         struct hl_cb *cb;
1247
1248         cb = hl_cb_get(mmg, chunk->cb_handle);
1249         if (!cb) {
1250                 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
1251                 return NULL;
1252         }
1253
1254         if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1255                 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1256                 goto release_cb;
1257         }
1258
1259         atomic_inc(&cb->cs_cnt);
1260
1261         return cb;
1262
1263 release_cb:
1264         hl_cb_put(cb);
1265         return NULL;
1266 }
1267
1268 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1269                 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1270 {
1271         struct hl_cs_job *job;
1272
1273         job = kzalloc(sizeof(*job), GFP_ATOMIC);
1274         if (!job)
1275                 job = kzalloc(sizeof(*job), GFP_KERNEL);
1276
1277         if (!job)
1278                 return NULL;
1279
1280         kref_init(&job->refcount);
1281         job->queue_type = queue_type;
1282         job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1283
1284         if (is_cb_patched(hdev, job))
1285                 INIT_LIST_HEAD(&job->userptr_list);
1286
1287         if (job->queue_type == QUEUE_TYPE_EXT)
1288                 INIT_WORK(&job->finish_work, job_wq_completion);
1289
1290         return job;
1291 }
1292
1293 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1294 {
1295         if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1296                 return CS_TYPE_SIGNAL;
1297         else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1298                 return CS_TYPE_WAIT;
1299         else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1300                 return CS_TYPE_COLLECTIVE_WAIT;
1301         else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1302                 return CS_RESERVE_SIGNALS;
1303         else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1304                 return CS_UNRESERVE_SIGNALS;
1305         else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
1306                 return CS_TYPE_ENGINE_CORE;
1307         else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
1308                 return CS_TYPE_FLUSH_PCI_HBW_WRITES;
1309         else
1310                 return CS_TYPE_DEFAULT;
1311 }
1312
1313 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1314 {
1315         struct hl_device *hdev = hpriv->hdev;
1316         struct hl_ctx *ctx = hpriv->ctx;
1317         u32 cs_type_flags, num_chunks;
1318         enum hl_device_status status;
1319         enum hl_cs_type cs_type;
1320         bool is_sync_stream;
1321         int i;
1322
1323         for (i = 0 ; i < sizeof(args->in.pad) ; i++)
1324                 if (args->in.pad[i]) {
1325                         dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1326                         return -EINVAL;
1327                 }
1328
1329         if (!hl_device_operational(hdev, &status)) {
1330                 return -EBUSY;
1331         }
1332
1333         if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1334                         !hdev->supports_staged_submission) {
1335                 dev_err(hdev->dev, "staged submission not supported");
1336                 return -EPERM;
1337         }
1338
1339         cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1340
1341         if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1342                 dev_err(hdev->dev,
1343                         "CS type flags are mutually exclusive, context %d\n",
1344                         ctx->asid);
1345                 return -EINVAL;
1346         }
1347
1348         cs_type = hl_cs_get_cs_type(cs_type_flags);
1349         num_chunks = args->in.num_chunks_execute;
1350
1351         is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
1352                         cs_type == CS_TYPE_COLLECTIVE_WAIT);
1353
1354         if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
1355                 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1356                 return -EINVAL;
1357         }
1358
1359         if (cs_type == CS_TYPE_DEFAULT) {
1360                 if (!num_chunks) {
1361                         dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1362                         return -EINVAL;
1363                 }
1364         } else if (is_sync_stream && num_chunks != 1) {
1365                 dev_err(hdev->dev,
1366                         "Sync stream CS mandates one chunk only, context %d\n",
1367                         ctx->asid);
1368                 return -EINVAL;
1369         }
1370
1371         return 0;
1372 }
1373
1374 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1375                                         struct hl_cs_chunk **cs_chunk_array,
1376                                         void __user *chunks, u32 num_chunks,
1377                                         struct hl_ctx *ctx)
1378 {
1379         u32 size_to_copy;
1380
1381         if (num_chunks > HL_MAX_JOBS_PER_CS) {
1382                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1383                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1384                 dev_err(hdev->dev,
1385                         "Number of chunks can NOT be larger than %d\n",
1386                         HL_MAX_JOBS_PER_CS);
1387                 return -EINVAL;
1388         }
1389
1390         *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1391                                         GFP_ATOMIC);
1392         if (!*cs_chunk_array)
1393                 *cs_chunk_array = kmalloc_array(num_chunks,
1394                                         sizeof(**cs_chunk_array), GFP_KERNEL);
1395         if (!*cs_chunk_array) {
1396                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1397                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1398                 return -ENOMEM;
1399         }
1400
1401         size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1402         if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1403                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1404                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1405                 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1406                 kfree(*cs_chunk_array);
1407                 return -EFAULT;
1408         }
1409
1410         return 0;
1411 }
1412
1413 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1414                                 u64 sequence, u32 flags,
1415                                 u32 encaps_signal_handle)
1416 {
1417         if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1418                 return 0;
1419
1420         cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1421         cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1422
1423         if (cs->staged_first) {
1424                 /* Staged CS sequence is the first CS sequence */
1425                 INIT_LIST_HEAD(&cs->staged_cs_node);
1426                 cs->staged_sequence = cs->sequence;
1427
1428                 if (cs->encaps_signals)
1429                         cs->encaps_sig_hdl_id = encaps_signal_handle;
1430         } else {
1431                 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1432                  * under the cs_mirror_lock
1433                  */
1434                 cs->staged_sequence = sequence;
1435         }
1436
1437         /* Increment CS reference if needed */
1438         staged_cs_get(hdev, cs);
1439
1440         cs->staged_cs = true;
1441
1442         return 0;
1443 }
1444
1445 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1446 {
1447         int i;
1448
1449         for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1450                 if (qid == hdev->stream_master_qid_arr[i])
1451                         return BIT(i);
1452
1453         return 0;
1454 }
1455
1456 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1457                                 u32 num_chunks, u64 *cs_seq, u32 flags,
1458                                 u32 encaps_signals_handle, u32 timeout,
1459                                 u16 *signal_initial_sob_count)
1460 {
1461         bool staged_mid, int_queues_only = true, using_hw_queues = false;
1462         struct hl_device *hdev = hpriv->hdev;
1463         struct hl_cs_chunk *cs_chunk_array;
1464         struct hl_cs_counters_atomic *cntr;
1465         struct hl_ctx *ctx = hpriv->ctx;
1466         struct hl_cs_job *job;
1467         struct hl_cs *cs;
1468         struct hl_cb *cb;
1469         u64 user_sequence;
1470         u8 stream_master_qid_map = 0;
1471         int rc, i;
1472
1473         cntr = &hdev->aggregated_cs_counters;
1474         user_sequence = *cs_seq;
1475         *cs_seq = ULLONG_MAX;
1476
1477         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1478                         hpriv->ctx);
1479         if (rc)
1480                 goto out;
1481
1482         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1483                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1484                 staged_mid = true;
1485         else
1486                 staged_mid = false;
1487
1488         rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1489                         staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1490                         timeout);
1491         if (rc)
1492                 goto free_cs_chunk_array;
1493
1494         *cs_seq = cs->sequence;
1495
1496         hl_debugfs_add_cs(cs);
1497
1498         rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1499                                                 encaps_signals_handle);
1500         if (rc)
1501                 goto free_cs_object;
1502
1503         /* If this is a staged submission we must return the staged sequence
1504          * rather than the internal CS sequence
1505          */
1506         if (cs->staged_cs)
1507                 *cs_seq = cs->staged_sequence;
1508
1509         /* Validate ALL the CS chunks before submitting the CS */
1510         for (i = 0 ; i < num_chunks ; i++) {
1511                 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1512                 enum hl_queue_type queue_type;
1513                 bool is_kernel_allocated_cb;
1514
1515                 rc = validate_queue_index(hdev, chunk, &queue_type,
1516                                                 &is_kernel_allocated_cb);
1517                 if (rc) {
1518                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1519                         atomic64_inc(&cntr->validation_drop_cnt);
1520                         goto free_cs_object;
1521                 }
1522
1523                 if (is_kernel_allocated_cb) {
1524                         cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
1525                         if (!cb) {
1526                                 atomic64_inc(
1527                                         &ctx->cs_counters.validation_drop_cnt);
1528                                 atomic64_inc(&cntr->validation_drop_cnt);
1529                                 rc = -EINVAL;
1530                                 goto free_cs_object;
1531                         }
1532                 } else {
1533                         cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1534                 }
1535
1536                 if (queue_type == QUEUE_TYPE_EXT ||
1537                                                 queue_type == QUEUE_TYPE_HW) {
1538                         int_queues_only = false;
1539
1540                         /*
1541                          * store which stream are being used for external/HW
1542                          * queues of this CS
1543                          */
1544                         if (hdev->supports_wait_for_multi_cs)
1545                                 stream_master_qid_map |=
1546                                         get_stream_master_qid_mask(hdev,
1547                                                         chunk->queue_index);
1548                 }
1549
1550                 if (queue_type == QUEUE_TYPE_HW)
1551                         using_hw_queues = true;
1552
1553                 job = hl_cs_allocate_job(hdev, queue_type,
1554                                                 is_kernel_allocated_cb);
1555                 if (!job) {
1556                         atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1557                         atomic64_inc(&cntr->out_of_mem_drop_cnt);
1558                         dev_err(hdev->dev, "Failed to allocate a new job\n");
1559                         rc = -ENOMEM;
1560                         if (is_kernel_allocated_cb)
1561                                 goto release_cb;
1562
1563                         goto free_cs_object;
1564                 }
1565
1566                 job->id = i + 1;
1567                 job->cs = cs;
1568                 job->user_cb = cb;
1569                 job->user_cb_size = chunk->cb_size;
1570                 job->hw_queue_id = chunk->queue_index;
1571
1572                 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1573                 cs->jobs_cnt++;
1574
1575                 list_add_tail(&job->cs_node, &cs->job_list);
1576
1577                 /*
1578                  * Increment CS reference. When CS reference is 0, CS is
1579                  * done and can be signaled to user and free all its resources
1580                  * Only increment for JOB on external or H/W queues, because
1581                  * only for those JOBs we get completion
1582                  */
1583                 if (cs_needs_completion(cs) &&
1584                         (job->queue_type == QUEUE_TYPE_EXT ||
1585                                 job->queue_type == QUEUE_TYPE_HW))
1586                         cs_get(cs);
1587
1588                 hl_debugfs_add_job(hdev, job);
1589
1590                 rc = cs_parser(hpriv, job);
1591                 if (rc) {
1592                         atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1593                         atomic64_inc(&cntr->parsing_drop_cnt);
1594                         dev_err(hdev->dev,
1595                                 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1596                                 cs->ctx->asid, cs->sequence, job->id, rc);
1597                         goto free_cs_object;
1598                 }
1599         }
1600
1601         /* We allow a CS with any queue type combination as long as it does
1602          * not get a completion
1603          */
1604         if (int_queues_only && cs_needs_completion(cs)) {
1605                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1606                 atomic64_inc(&cntr->validation_drop_cnt);
1607                 dev_err(hdev->dev,
1608                         "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1609                         cs->ctx->asid, cs->sequence);
1610                 rc = -EINVAL;
1611                 goto free_cs_object;
1612         }
1613
1614         if (using_hw_queues)
1615                 INIT_WORK(&cs->finish_work, cs_completion);
1616
1617         /*
1618          * store the (external/HW queues) streams used by the CS in the
1619          * fence object for multi-CS completion
1620          */
1621         if (hdev->supports_wait_for_multi_cs)
1622                 cs->fence->stream_master_qid_map = stream_master_qid_map;
1623
1624         rc = hl_hw_queue_schedule_cs(cs);
1625         if (rc) {
1626                 if (rc != -EAGAIN)
1627                         dev_err(hdev->dev,
1628                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1629                                 cs->ctx->asid, cs->sequence, rc);
1630                 goto free_cs_object;
1631         }
1632
1633         *signal_initial_sob_count = cs->initial_sob_count;
1634
1635         rc = HL_CS_STATUS_SUCCESS;
1636         goto put_cs;
1637
1638 release_cb:
1639         atomic_dec(&cb->cs_cnt);
1640         hl_cb_put(cb);
1641 free_cs_object:
1642         cs_rollback(hdev, cs);
1643         *cs_seq = ULLONG_MAX;
1644         /* The path below is both for good and erroneous exits */
1645 put_cs:
1646         /* We finished with the CS in this function, so put the ref */
1647         cs_put(cs);
1648 free_cs_chunk_array:
1649         kfree(cs_chunk_array);
1650 out:
1651         return rc;
1652 }
1653
1654 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1655                                 u64 *cs_seq)
1656 {
1657         struct hl_device *hdev = hpriv->hdev;
1658         struct hl_ctx *ctx = hpriv->ctx;
1659         bool need_soft_reset = false;
1660         int rc = 0, do_ctx_switch = 0;
1661         void __user *chunks;
1662         u32 num_chunks, tmp;
1663         u16 sob_count;
1664         int ret;
1665
1666         if (hdev->supports_ctx_switch)
1667                 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1668
1669         if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1670                 mutex_lock(&hpriv->restore_phase_mutex);
1671
1672                 if (do_ctx_switch) {
1673                         rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1674                         if (rc) {
1675                                 dev_err_ratelimited(hdev->dev,
1676                                         "Failed to switch to context %d, rejecting CS! %d\n",
1677                                         ctx->asid, rc);
1678                                 /*
1679                                  * If we timedout, or if the device is not IDLE
1680                                  * while we want to do context-switch (-EBUSY),
1681                                  * we need to soft-reset because QMAN is
1682                                  * probably stuck. However, we can't call to
1683                                  * reset here directly because of deadlock, so
1684                                  * need to do it at the very end of this
1685                                  * function
1686                                  */
1687                                 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1688                                         need_soft_reset = true;
1689                                 mutex_unlock(&hpriv->restore_phase_mutex);
1690                                 goto out;
1691                         }
1692                 }
1693
1694                 hdev->asic_funcs->restore_phase_topology(hdev);
1695
1696                 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1697                 num_chunks = args->in.num_chunks_restore;
1698
1699                 if (!num_chunks) {
1700                         dev_dbg(hdev->dev,
1701                                 "Need to run restore phase but restore CS is empty\n");
1702                         rc = 0;
1703                 } else {
1704                         rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1705                                         cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
1706                 }
1707
1708                 mutex_unlock(&hpriv->restore_phase_mutex);
1709
1710                 if (rc) {
1711                         dev_err(hdev->dev,
1712                                 "Failed to submit restore CS for context %d (%d)\n",
1713                                 ctx->asid, rc);
1714                         goto out;
1715                 }
1716
1717                 /* Need to wait for restore completion before execution phase */
1718                 if (num_chunks) {
1719                         enum hl_cs_wait_status status;
1720 wait_again:
1721                         ret = _hl_cs_wait_ioctl(hdev, ctx,
1722                                         jiffies_to_usecs(hdev->timeout_jiffies),
1723                                         *cs_seq, &status, NULL);
1724                         if (ret) {
1725                                 if (ret == -ERESTARTSYS) {
1726                                         usleep_range(100, 200);
1727                                         goto wait_again;
1728                                 }
1729
1730                                 dev_err(hdev->dev,
1731                                         "Restore CS for context %d failed to complete %d\n",
1732                                         ctx->asid, ret);
1733                                 rc = -ENOEXEC;
1734                                 goto out;
1735                         }
1736                 }
1737
1738                 if (hdev->supports_ctx_switch)
1739                         ctx->thread_ctx_switch_wait_token = 1;
1740
1741         } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1742                 rc = hl_poll_timeout_memory(hdev,
1743                         &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1744                         100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1745
1746                 if (rc == -ETIMEDOUT) {
1747                         dev_err(hdev->dev,
1748                                 "context switch phase timeout (%d)\n", tmp);
1749                         goto out;
1750                 }
1751         }
1752
1753 out:
1754         if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1755                 hl_device_reset(hdev, 0);
1756
1757         return rc;
1758 }
1759
1760 /*
1761  * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1762  * if the SOB value reaches the max value move to the other SOB reserved
1763  * to the queue.
1764  * @hdev: pointer to device structure
1765  * @q_idx: stream queue index
1766  * @hw_sob: the H/W SOB used in this signal CS.
1767  * @count: signals count
1768  * @encaps_sig: tells whether it's reservation for encaps signals or not.
1769  *
1770  * Note that this function must be called while hw_queues_lock is taken.
1771  */
1772 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1773                         struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1774
1775 {
1776         struct hl_sync_stream_properties *prop;
1777         struct hl_hw_sob *sob = *hw_sob, *other_sob;
1778         u8 other_sob_offset;
1779
1780         prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1781
1782         hw_sob_get(sob);
1783
1784         /* check for wraparound */
1785         if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1786                 /*
1787                  * Decrement as we reached the max value.
1788                  * The release function won't be called here as we've
1789                  * just incremented the refcount right before calling this
1790                  * function.
1791                  */
1792                 hw_sob_put_err(sob);
1793
1794                 /*
1795                  * check the other sob value, if it still in use then fail
1796                  * otherwise make the switch
1797                  */
1798                 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1799                 other_sob = &prop->hw_sob[other_sob_offset];
1800
1801                 if (kref_read(&other_sob->kref) != 1) {
1802                         dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1803                                                                 q_idx);
1804                         return -EINVAL;
1805                 }
1806
1807                 /*
1808                  * next_sob_val always points to the next available signal
1809                  * in the sob, so in encaps signals it will be the next one
1810                  * after reserving the required amount.
1811                  */
1812                 if (encaps_sig)
1813                         prop->next_sob_val = count + 1;
1814                 else
1815                         prop->next_sob_val = count;
1816
1817                 /* only two SOBs are currently in use */
1818                 prop->curr_sob_offset = other_sob_offset;
1819                 *hw_sob = other_sob;
1820
1821                 /*
1822                  * check if other_sob needs reset, then do it before using it
1823                  * for the reservation or the next signal cs.
1824                  * we do it here, and for both encaps and regular signal cs
1825                  * cases in order to avoid possible races of two kref_put
1826                  * of the sob which can occur at the same time if we move the
1827                  * sob reset(kref_put) to cs_do_release function.
1828                  * in addition, if we have combination of cs signal and
1829                  * encaps, and at the point we need to reset the sob there was
1830                  * no more reservations and only signal cs keep coming,
1831                  * in such case we need signal_cs to put the refcount and
1832                  * reset the sob.
1833                  */
1834                 if (other_sob->need_reset)
1835                         hw_sob_put(other_sob);
1836
1837                 if (encaps_sig) {
1838                         /* set reset indication for the sob */
1839                         sob->need_reset = true;
1840                         hw_sob_get(other_sob);
1841                 }
1842
1843                 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1844                                 prop->curr_sob_offset, q_idx);
1845         } else {
1846                 prop->next_sob_val += count;
1847         }
1848
1849         return 0;
1850 }
1851
1852 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1853                 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1854                 bool encaps_signals)
1855 {
1856         u64 *signal_seq_arr = NULL;
1857         u32 size_to_copy, signal_seq_arr_len;
1858         int rc = 0;
1859
1860         if (encaps_signals) {
1861                 *signal_seq = chunk->encaps_signal_seq;
1862                 return 0;
1863         }
1864
1865         signal_seq_arr_len = chunk->num_signal_seq_arr;
1866
1867         /* currently only one signal seq is supported */
1868         if (signal_seq_arr_len != 1) {
1869                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1870                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1871                 dev_err(hdev->dev,
1872                         "Wait for signal CS supports only one signal CS seq\n");
1873                 return -EINVAL;
1874         }
1875
1876         signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1877                                         sizeof(*signal_seq_arr),
1878                                         GFP_ATOMIC);
1879         if (!signal_seq_arr)
1880                 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1881                                         sizeof(*signal_seq_arr),
1882                                         GFP_KERNEL);
1883         if (!signal_seq_arr) {
1884                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1885                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1886                 return -ENOMEM;
1887         }
1888
1889         size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1890         if (copy_from_user(signal_seq_arr,
1891                                 u64_to_user_ptr(chunk->signal_seq_arr),
1892                                 size_to_copy)) {
1893                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1894                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1895                 dev_err(hdev->dev,
1896                         "Failed to copy signal seq array from user\n");
1897                 rc = -EFAULT;
1898                 goto out;
1899         }
1900
1901         /* currently it is guaranteed to have only one signal seq */
1902         *signal_seq = signal_seq_arr[0];
1903
1904 out:
1905         kfree(signal_seq_arr);
1906
1907         return rc;
1908 }
1909
1910 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1911                 struct hl_ctx *ctx, struct hl_cs *cs,
1912                 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1913 {
1914         struct hl_cs_counters_atomic *cntr;
1915         struct hl_cs_job *job;
1916         struct hl_cb *cb;
1917         u32 cb_size;
1918
1919         cntr = &hdev->aggregated_cs_counters;
1920
1921         job = hl_cs_allocate_job(hdev, q_type, true);
1922         if (!job) {
1923                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1924                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1925                 dev_err(hdev->dev, "Failed to allocate a new job\n");
1926                 return -ENOMEM;
1927         }
1928
1929         if (cs->type == CS_TYPE_WAIT)
1930                 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1931         else
1932                 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1933
1934         cb = hl_cb_kernel_create(hdev, cb_size,
1935                                 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1936         if (!cb) {
1937                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1938                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1939                 kfree(job);
1940                 return -EFAULT;
1941         }
1942
1943         job->id = 0;
1944         job->cs = cs;
1945         job->user_cb = cb;
1946         atomic_inc(&job->user_cb->cs_cnt);
1947         job->user_cb_size = cb_size;
1948         job->hw_queue_id = q_idx;
1949
1950         if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1951                         && cs->encaps_signals)
1952                 job->encaps_sig_wait_offset = encaps_signal_offset;
1953         /*
1954          * No need in parsing, user CB is the patched CB.
1955          * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1956          * the CB idr anymore and to decrement its refcount as it was
1957          * incremented inside hl_cb_kernel_create().
1958          */
1959         job->patched_cb = job->user_cb;
1960         job->job_cb_size = job->user_cb_size;
1961         hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
1962
1963         /* increment refcount as for external queues we get completion */
1964         cs_get(cs);
1965
1966         cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1967         cs->jobs_cnt++;
1968
1969         list_add_tail(&job->cs_node, &cs->job_list);
1970
1971         hl_debugfs_add_job(hdev, job);
1972
1973         return 0;
1974 }
1975
1976 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1977                                 u32 q_idx, u32 count,
1978                                 u32 *handle_id, u32 *sob_addr,
1979                                 u32 *signals_count)
1980 {
1981         struct hw_queue_properties *hw_queue_prop;
1982         struct hl_sync_stream_properties *prop;
1983         struct hl_device *hdev = hpriv->hdev;
1984         struct hl_cs_encaps_sig_handle *handle;
1985         struct hl_encaps_signals_mgr *mgr;
1986         struct hl_hw_sob *hw_sob;
1987         int hdl_id;
1988         int rc = 0;
1989
1990         if (count >= HL_MAX_SOB_VAL) {
1991                 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1992                                                 count);
1993                 rc = -EINVAL;
1994                 goto out;
1995         }
1996
1997         if (q_idx >= hdev->asic_prop.max_queues) {
1998                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1999                         q_idx);
2000                 rc = -EINVAL;
2001                 goto out;
2002         }
2003
2004         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2005
2006         if (!hw_queue_prop->supports_sync_stream) {
2007                 dev_err(hdev->dev,
2008                         "Queue index %d does not support sync stream operations\n",
2009                                                                         q_idx);
2010                 rc = -EINVAL;
2011                 goto out;
2012         }
2013
2014         prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2015
2016         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
2017         if (!handle) {
2018                 rc = -ENOMEM;
2019                 goto out;
2020         }
2021
2022         handle->count = count;
2023
2024         hl_ctx_get(hpriv->ctx);
2025         handle->ctx = hpriv->ctx;
2026         mgr = &hpriv->ctx->sig_mgr;
2027
2028         spin_lock(&mgr->lock);
2029         hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
2030         spin_unlock(&mgr->lock);
2031
2032         if (hdl_id < 0) {
2033                 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
2034                 rc = -EINVAL;
2035                 goto put_ctx;
2036         }
2037
2038         handle->id = hdl_id;
2039         handle->q_idx = q_idx;
2040         handle->hdev = hdev;
2041         kref_init(&handle->refcount);
2042
2043         hdev->asic_funcs->hw_queues_lock(hdev);
2044
2045         hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2046
2047         /*
2048          * Increment the SOB value by count by user request
2049          * to reserve those signals
2050          * check if the signals amount to reserve is not exceeding the max sob
2051          * value, if yes then switch sob.
2052          */
2053         rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
2054                                                                 true);
2055         if (rc) {
2056                 dev_err(hdev->dev, "Failed to switch SOB\n");
2057                 hdev->asic_funcs->hw_queues_unlock(hdev);
2058                 rc = -EINVAL;
2059                 goto remove_idr;
2060         }
2061         /* set the hw_sob to the handle after calling the sob wraparound handler
2062          * since sob could have changed.
2063          */
2064         handle->hw_sob = hw_sob;
2065
2066         /* store the current sob value for unreserve validity check, and
2067          * signal offset support
2068          */
2069         handle->pre_sob_val = prop->next_sob_val - handle->count;
2070
2071         handle->cs_seq = ULLONG_MAX;
2072
2073         *signals_count = prop->next_sob_val;
2074         hdev->asic_funcs->hw_queues_unlock(hdev);
2075
2076         *sob_addr = handle->hw_sob->sob_addr;
2077         *handle_id = hdl_id;
2078
2079         dev_dbg(hdev->dev,
2080                 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
2081                         hw_sob->sob_id, handle->hw_sob->sob_addr,
2082                         prop->next_sob_val - 1, q_idx, hdl_id);
2083         goto out;
2084
2085 remove_idr:
2086         spin_lock(&mgr->lock);
2087         idr_remove(&mgr->handles, hdl_id);
2088         spin_unlock(&mgr->lock);
2089
2090 put_ctx:
2091         hl_ctx_put(handle->ctx);
2092         kfree(handle);
2093
2094 out:
2095         return rc;
2096 }
2097
2098 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
2099 {
2100         struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
2101         struct hl_sync_stream_properties *prop;
2102         struct hl_device *hdev = hpriv->hdev;
2103         struct hl_encaps_signals_mgr *mgr;
2104         struct hl_hw_sob *hw_sob;
2105         u32 q_idx, sob_addr;
2106         int rc = 0;
2107
2108         mgr = &hpriv->ctx->sig_mgr;
2109
2110         spin_lock(&mgr->lock);
2111         encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
2112         if (encaps_sig_hdl) {
2113                 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
2114                                 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
2115                                         encaps_sig_hdl->count);
2116
2117                 hdev->asic_funcs->hw_queues_lock(hdev);
2118
2119                 q_idx = encaps_sig_hdl->q_idx;
2120                 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2121                 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2122                 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
2123
2124                 /* Check if sob_val got out of sync due to other
2125                  * signal submission requests which were handled
2126                  * between the reserve-unreserve calls or SOB switch
2127                  * upon reaching SOB max value.
2128                  */
2129                 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
2130                                 != prop->next_sob_val ||
2131                                 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
2132                         dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
2133                                 encaps_sig_hdl->pre_sob_val,
2134                                 (prop->next_sob_val - encaps_sig_hdl->count));
2135
2136                         hdev->asic_funcs->hw_queues_unlock(hdev);
2137                         rc = -EINVAL;
2138                         goto out;
2139                 }
2140
2141                 /*
2142                  * Decrement the SOB value by count by user request
2143                  * to unreserve those signals
2144                  */
2145                 prop->next_sob_val -= encaps_sig_hdl->count;
2146
2147                 hdev->asic_funcs->hw_queues_unlock(hdev);
2148
2149                 hw_sob_put(hw_sob);
2150
2151                 /* Release the id and free allocated memory of the handle */
2152                 idr_remove(&mgr->handles, handle_id);
2153                 hl_ctx_put(encaps_sig_hdl->ctx);
2154                 kfree(encaps_sig_hdl);
2155         } else {
2156                 rc = -EINVAL;
2157                 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
2158         }
2159 out:
2160         spin_unlock(&mgr->lock);
2161
2162         return rc;
2163 }
2164
2165 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
2166                                 void __user *chunks, u32 num_chunks,
2167                                 u64 *cs_seq, u32 flags, u32 timeout,
2168                                 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
2169 {
2170         struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
2171         bool handle_found = false, is_wait_cs = false,
2172                         wait_cs_submitted = false,
2173                         cs_encaps_signals = false;
2174         struct hl_cs_chunk *cs_chunk_array, *chunk;
2175         bool staged_cs_with_encaps_signals = false;
2176         struct hw_queue_properties *hw_queue_prop;
2177         struct hl_device *hdev = hpriv->hdev;
2178         struct hl_cs_compl *sig_waitcs_cmpl;
2179         u32 q_idx, collective_engine_id = 0;
2180         struct hl_cs_counters_atomic *cntr;
2181         struct hl_fence *sig_fence = NULL;
2182         struct hl_ctx *ctx = hpriv->ctx;
2183         enum hl_queue_type q_type;
2184         struct hl_cs *cs;
2185         u64 signal_seq;
2186         int rc;
2187
2188         cntr = &hdev->aggregated_cs_counters;
2189         *cs_seq = ULLONG_MAX;
2190
2191         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
2192                         ctx);
2193         if (rc)
2194                 goto out;
2195
2196         /* currently it is guaranteed to have only one chunk */
2197         chunk = &cs_chunk_array[0];
2198
2199         if (chunk->queue_index >= hdev->asic_prop.max_queues) {
2200                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2201                 atomic64_inc(&cntr->validation_drop_cnt);
2202                 dev_err(hdev->dev, "Queue index %d is invalid\n",
2203                         chunk->queue_index);
2204                 rc = -EINVAL;
2205                 goto free_cs_chunk_array;
2206         }
2207
2208         q_idx = chunk->queue_index;
2209         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2210         q_type = hw_queue_prop->type;
2211
2212         if (!hw_queue_prop->supports_sync_stream) {
2213                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2214                 atomic64_inc(&cntr->validation_drop_cnt);
2215                 dev_err(hdev->dev,
2216                         "Queue index %d does not support sync stream operations\n",
2217                         q_idx);
2218                 rc = -EINVAL;
2219                 goto free_cs_chunk_array;
2220         }
2221
2222         if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2223                 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2224                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2225                         atomic64_inc(&cntr->validation_drop_cnt);
2226                         dev_err(hdev->dev,
2227                                 "Queue index %d is invalid\n", q_idx);
2228                         rc = -EINVAL;
2229                         goto free_cs_chunk_array;
2230                 }
2231
2232                 if (!hdev->nic_ports_mask) {
2233                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2234                         atomic64_inc(&cntr->validation_drop_cnt);
2235                         dev_err(hdev->dev,
2236                                 "Collective operations not supported when NIC ports are disabled");
2237                         rc = -EINVAL;
2238                         goto free_cs_chunk_array;
2239                 }
2240
2241                 collective_engine_id = chunk->collective_engine_id;
2242         }
2243
2244         is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2245                         cs_type == CS_TYPE_COLLECTIVE_WAIT);
2246
2247         cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2248
2249         if (is_wait_cs) {
2250                 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2251                                 ctx, cs_encaps_signals);
2252                 if (rc)
2253                         goto free_cs_chunk_array;
2254
2255                 if (cs_encaps_signals) {
2256                         /* check if cs sequence has encapsulated
2257                          * signals handle
2258                          */
2259                         struct idr *idp;
2260                         u32 id;
2261
2262                         spin_lock(&ctx->sig_mgr.lock);
2263                         idp = &ctx->sig_mgr.handles;
2264                         idr_for_each_entry(idp, encaps_sig_hdl, id) {
2265                                 if (encaps_sig_hdl->cs_seq == signal_seq) {
2266                                         /* get refcount to protect removing this handle from idr,
2267                                          * needed when multiple wait cs are used with offset
2268                                          * to wait on reserved encaps signals.
2269                                          * Since kref_put of this handle is executed outside the
2270                                          * current lock, it is possible that the handle refcount
2271                                          * is 0 but it yet to be removed from the list. In this
2272                                          * case need to consider the handle as not valid.
2273                                          */
2274                                         if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
2275                                                 handle_found = true;
2276                                         break;
2277                                 }
2278                         }
2279                         spin_unlock(&ctx->sig_mgr.lock);
2280
2281                         if (!handle_found) {
2282                                 /* treat as signal CS already finished */
2283                                 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2284                                                 signal_seq);
2285                                 rc = 0;
2286                                 goto free_cs_chunk_array;
2287                         }
2288
2289                         /* validate also the signal offset value */
2290                         if (chunk->encaps_signal_offset >
2291                                         encaps_sig_hdl->count) {
2292                                 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2293                                                 chunk->encaps_signal_offset,
2294                                                 encaps_sig_hdl->count);
2295                                 rc = -EINVAL;
2296                                 goto free_cs_chunk_array;
2297                         }
2298                 }
2299
2300                 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2301                 if (IS_ERR(sig_fence)) {
2302                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2303                         atomic64_inc(&cntr->validation_drop_cnt);
2304                         dev_err(hdev->dev,
2305                                 "Failed to get signal CS with seq 0x%llx\n",
2306                                 signal_seq);
2307                         rc = PTR_ERR(sig_fence);
2308                         goto free_cs_chunk_array;
2309                 }
2310
2311                 if (!sig_fence) {
2312                         /* signal CS already finished */
2313                         rc = 0;
2314                         goto free_cs_chunk_array;
2315                 }
2316
2317                 sig_waitcs_cmpl =
2318                         container_of(sig_fence, struct hl_cs_compl, base_fence);
2319
2320                 staged_cs_with_encaps_signals = !!
2321                                 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2322                                 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2323
2324                 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2325                                 !staged_cs_with_encaps_signals) {
2326                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2327                         atomic64_inc(&cntr->validation_drop_cnt);
2328                         dev_err(hdev->dev,
2329                                 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2330                                 signal_seq);
2331                         hl_fence_put(sig_fence);
2332                         rc = -EINVAL;
2333                         goto free_cs_chunk_array;
2334                 }
2335
2336                 if (completion_done(&sig_fence->completion)) {
2337                         /* signal CS already finished */
2338                         hl_fence_put(sig_fence);
2339                         rc = 0;
2340                         goto free_cs_chunk_array;
2341                 }
2342         }
2343
2344         rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2345         if (rc) {
2346                 if (is_wait_cs)
2347                         hl_fence_put(sig_fence);
2348
2349                 goto free_cs_chunk_array;
2350         }
2351
2352         /*
2353          * Save the signal CS fence for later initialization right before
2354          * hanging the wait CS on the queue.
2355          * for encaps signals case, we save the cs sequence and handle pointer
2356          * for later initialization.
2357          */
2358         if (is_wait_cs) {
2359                 cs->signal_fence = sig_fence;
2360                 /* store the handle pointer, so we don't have to
2361                  * look for it again, later on the flow
2362                  * when we need to set SOB info in hw_queue.
2363                  */
2364                 if (cs->encaps_signals)
2365                         cs->encaps_sig_hdl = encaps_sig_hdl;
2366         }
2367
2368         hl_debugfs_add_cs(cs);
2369
2370         *cs_seq = cs->sequence;
2371
2372         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2373                 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2374                                 q_idx, chunk->encaps_signal_offset);
2375         else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2376                 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2377                                 cs, q_idx, collective_engine_id,
2378                                 chunk->encaps_signal_offset);
2379         else {
2380                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2381                 atomic64_inc(&cntr->validation_drop_cnt);
2382                 rc = -EINVAL;
2383         }
2384
2385         if (rc)
2386                 goto free_cs_object;
2387
2388         if (q_type == QUEUE_TYPE_HW)
2389                 INIT_WORK(&cs->finish_work, cs_completion);
2390
2391         rc = hl_hw_queue_schedule_cs(cs);
2392         if (rc) {
2393                 /* In case wait cs failed here, it means the signal cs
2394                  * already completed. we want to free all it's related objects
2395                  * but we don't want to fail the ioctl.
2396                  */
2397                 if (is_wait_cs)
2398                         rc = 0;
2399                 else if (rc != -EAGAIN)
2400                         dev_err(hdev->dev,
2401                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2402                                 ctx->asid, cs->sequence, rc);
2403                 goto free_cs_object;
2404         }
2405
2406         *signal_sob_addr_offset = cs->sob_addr_offset;
2407         *signal_initial_sob_count = cs->initial_sob_count;
2408
2409         rc = HL_CS_STATUS_SUCCESS;
2410         if (is_wait_cs)
2411                 wait_cs_submitted = true;
2412         goto put_cs;
2413
2414 free_cs_object:
2415         cs_rollback(hdev, cs);
2416         *cs_seq = ULLONG_MAX;
2417         /* The path below is both for good and erroneous exits */
2418 put_cs:
2419         /* We finished with the CS in this function, so put the ref */
2420         cs_put(cs);
2421 free_cs_chunk_array:
2422         if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
2423                 kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
2424         kfree(cs_chunk_array);
2425 out:
2426         return rc;
2427 }
2428
2429 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
2430                                                 u32 num_engine_cores, u32 core_command)
2431 {
2432         int rc;
2433         struct hl_device *hdev = hpriv->hdev;
2434         void __user *engine_cores_arr;
2435         u32 *cores;
2436
2437         if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
2438                 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
2439                 return -EINVAL;
2440         }
2441
2442         if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
2443                 dev_err(hdev->dev, "Engine core command is invalid\n");
2444                 return -EINVAL;
2445         }
2446
2447         engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
2448         cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
2449         if (!cores)
2450                 return -ENOMEM;
2451
2452         if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
2453                 dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
2454                 kfree(cores);
2455                 return -EFAULT;
2456         }
2457
2458         rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
2459         kfree(cores);
2460
2461         return rc;
2462 }
2463
2464 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
2465 {
2466         struct hl_device *hdev = hpriv->hdev;
2467         struct asic_fixed_properties *prop = &hdev->asic_prop;
2468
2469         if (!prop->hbw_flush_reg) {
2470                 dev_dbg(hdev->dev, "HBW flush is not supported\n");
2471                 return -EOPNOTSUPP;
2472         }
2473
2474         RREG32(prop->hbw_flush_reg);
2475
2476         return 0;
2477 }
2478
2479 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2480 {
2481         union hl_cs_args *args = data;
2482         enum hl_cs_type cs_type = 0;
2483         u64 cs_seq = ULONG_MAX;
2484         void __user *chunks;
2485         u32 num_chunks, flags, timeout,
2486                 signals_count = 0, sob_addr = 0, handle_id = 0;
2487         u16 sob_initial_count = 0;
2488         int rc;
2489
2490         rc = hl_cs_sanity_checks(hpriv, args);
2491         if (rc)
2492                 goto out;
2493
2494         rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2495         if (rc)
2496                 goto out;
2497
2498         cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2499                                         ~HL_CS_FLAGS_FORCE_RESTORE);
2500         chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2501         num_chunks = args->in.num_chunks_execute;
2502         flags = args->in.cs_flags;
2503
2504         /* In case this is a staged CS, user should supply the CS sequence */
2505         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2506                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2507                 cs_seq = args->in.seq;
2508
2509         timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2510                         ? msecs_to_jiffies(args->in.timeout * 1000)
2511                         : hpriv->hdev->timeout_jiffies;
2512
2513         switch (cs_type) {
2514         case CS_TYPE_SIGNAL:
2515         case CS_TYPE_WAIT:
2516         case CS_TYPE_COLLECTIVE_WAIT:
2517                 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2518                                         &cs_seq, args->in.cs_flags, timeout,
2519                                         &sob_addr, &sob_initial_count);
2520                 break;
2521         case CS_RESERVE_SIGNALS:
2522                 rc = cs_ioctl_reserve_signals(hpriv,
2523                                         args->in.encaps_signals_q_idx,
2524                                         args->in.encaps_signals_count,
2525                                         &handle_id, &sob_addr, &signals_count);
2526                 break;
2527         case CS_UNRESERVE_SIGNALS:
2528                 rc = cs_ioctl_unreserve_signals(hpriv,
2529                                         args->in.encaps_sig_handle_id);
2530                 break;
2531         case CS_TYPE_ENGINE_CORE:
2532                 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
2533                                 args->in.num_engine_cores, args->in.core_command);
2534                 break;
2535         case CS_TYPE_FLUSH_PCI_HBW_WRITES:
2536                 rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
2537                 break;
2538         default:
2539                 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2540                                                 args->in.cs_flags,
2541                                                 args->in.encaps_sig_handle_id,
2542                                                 timeout, &sob_initial_count);
2543                 break;
2544         }
2545 out:
2546         if (rc != -EAGAIN) {
2547                 memset(args, 0, sizeof(*args));
2548
2549                 switch (cs_type) {
2550                 case CS_RESERVE_SIGNALS:
2551                         args->out.handle_id = handle_id;
2552                         args->out.sob_base_addr_offset = sob_addr;
2553                         args->out.count = signals_count;
2554                         break;
2555                 case CS_TYPE_SIGNAL:
2556                         args->out.sob_base_addr_offset = sob_addr;
2557                         args->out.sob_count_before_submission = sob_initial_count;
2558                         args->out.seq = cs_seq;
2559                         break;
2560                 case CS_TYPE_DEFAULT:
2561                         args->out.sob_count_before_submission = sob_initial_count;
2562                         args->out.seq = cs_seq;
2563                         break;
2564                 default:
2565                         args->out.seq = cs_seq;
2566                         break;
2567                 }
2568
2569                 args->out.status = rc;
2570         }
2571
2572         return rc;
2573 }
2574
2575 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2576                                 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
2577 {
2578         struct hl_device *hdev = ctx->hdev;
2579         ktime_t timestamp_kt;
2580         long completion_rc;
2581         int rc = 0, error;
2582
2583         if (IS_ERR(fence)) {
2584                 rc = PTR_ERR(fence);
2585                 if (rc == -EINVAL)
2586                         dev_notice_ratelimited(hdev->dev,
2587                                 "Can't wait on CS %llu because current CS is at seq %llu\n",
2588                                 seq, ctx->cs_sequence);
2589                 return rc;
2590         }
2591
2592         if (!fence) {
2593                 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, &timestamp_kt, &error)) {
2594                         dev_dbg(hdev->dev,
2595                                 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2596                                 seq, ctx->cs_sequence);
2597                         *status = CS_WAIT_STATUS_GONE;
2598                         return 0;
2599                 }
2600
2601                 completion_rc = 1;
2602                 goto report_results;
2603         }
2604
2605         if (!timeout_us) {
2606                 completion_rc = completion_done(&fence->completion);
2607         } else {
2608                 unsigned long timeout;
2609
2610                 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2611                                 timeout_us : usecs_to_jiffies(timeout_us);
2612                 completion_rc =
2613                         wait_for_completion_interruptible_timeout(
2614                                 &fence->completion, timeout);
2615         }
2616
2617         error = fence->error;
2618         timestamp_kt = fence->timestamp;
2619
2620 report_results:
2621         if (completion_rc > 0) {
2622                 *status = CS_WAIT_STATUS_COMPLETED;
2623                 if (timestamp)
2624                         *timestamp = ktime_to_ns(timestamp_kt);
2625         } else {
2626                 *status = CS_WAIT_STATUS_BUSY;
2627         }
2628
2629         if (completion_rc == -ERESTARTSYS)
2630                 rc = completion_rc;
2631         else if (error == -ETIMEDOUT || error == -EIO)
2632                 rc = error;
2633
2634         return rc;
2635 }
2636
2637 /*
2638  * hl_cs_poll_fences - iterate CS fences to check for CS completion
2639  *
2640  * @mcs_data: multi-CS internal data
2641  * @mcs_compl: multi-CS completion structure
2642  *
2643  * @return 0 on success, otherwise non 0 error code
2644  *
2645  * The function iterates on all CS sequence in the list and set bit in
2646  * completion_bitmap for each completed CS.
2647  * While iterating, the function sets the stream map of each fence in the fence
2648  * array in the completion QID stream map to be used by CSs to perform
2649  * completion to the multi-CS context.
2650  * This function shall be called after taking context ref
2651  */
2652 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
2653 {
2654         struct hl_fence **fence_ptr = mcs_data->fence_arr;
2655         struct hl_device *hdev = mcs_data->ctx->hdev;
2656         int i, rc, arr_len = mcs_data->arr_len;
2657         u64 *seq_arr = mcs_data->seq_arr;
2658         ktime_t max_ktime, first_cs_time;
2659         enum hl_cs_wait_status status;
2660
2661         memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
2662
2663         /* get all fences under the same lock */
2664         rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2665         if (rc)
2666                 return rc;
2667
2668         /*
2669          * re-initialize the completion here to handle 2 possible cases:
2670          * 1. CS will complete the multi-CS prior clearing the completion. in which
2671          *    case the fence iteration is guaranteed to catch the CS completion.
2672          * 2. the completion will occur after re-init of the completion.
2673          *    in which case we will wake up immediately in wait_for_completion.
2674          */
2675         reinit_completion(&mcs_compl->completion);
2676
2677         /*
2678          * set to maximum time to verify timestamp is valid: if at the end
2679          * this value is maintained- no timestamp was updated
2680          */
2681         max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2682         first_cs_time = max_ktime;
2683
2684         for (i = 0; i < arr_len; i++, fence_ptr++) {
2685                 struct hl_fence *fence = *fence_ptr;
2686
2687                 /*
2688                  * In order to prevent case where we wait until timeout even though a CS associated
2689                  * with the multi-CS actually completed we do things in the below order:
2690                  * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
2691                  *    any CS can, potentially, complete the multi CS for the specific QID (note
2692                  *    that once completion is initialized, calling complete* and then wait on the
2693                  *    completion will cause it to return at once)
2694                  * 2. only after allowing multi-CS completion for the specific QID we check whether
2695                  *    the specific CS already completed (and thus the wait for completion part will
2696                  *    be skipped). if the CS not completed it is guaranteed that completing CS will
2697                  *    wake up the completion.
2698                  */
2699                 if (fence)
2700                         mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
2701
2702                 /*
2703                  * function won't sleep as it is called with timeout 0 (i.e.
2704                  * poll the fence)
2705                  */
2706                 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2707                 if (rc) {
2708                         dev_err(hdev->dev,
2709                                 "wait_for_fence error :%d for CS seq %llu\n",
2710                                                                 rc, seq_arr[i]);
2711                         break;
2712                 }
2713
2714                 switch (status) {
2715                 case CS_WAIT_STATUS_BUSY:
2716                         /* CS did not finished, QID to wait on already stored */
2717                         break;
2718                 case CS_WAIT_STATUS_COMPLETED:
2719                         /*
2720                          * Using mcs_handling_done to avoid possibility of mcs_data
2721                          * returns to user indicating CS completed before it finished
2722                          * all of its mcs handling, to avoid race the next time the
2723                          * user waits for mcs.
2724                          * note: when reaching this case fence is definitely not NULL
2725                          *       but NULL check was added to overcome static analysis
2726                          */
2727                         if (fence && !fence->mcs_handling_done) {
2728                                 /*
2729                                  * in case multi CS is completed but MCS handling not done
2730                                  * we "complete" the multi CS to prevent it from waiting
2731                                  * until time-out and the "multi-CS handling done" will have
2732                                  * another chance at the next iteration
2733                                  */
2734                                 complete_all(&mcs_compl->completion);
2735                                 break;
2736                         }
2737
2738                         mcs_data->completion_bitmap |= BIT(i);
2739                         /*
2740                          * For all completed CSs we take the earliest timestamp.
2741                          * For this we have to validate that the timestamp is
2742                          * earliest of all timestamps so far.
2743                          */
2744                         if (fence && mcs_data->update_ts &&
2745                                         (ktime_compare(fence->timestamp, first_cs_time) < 0))
2746                                 first_cs_time = fence->timestamp;
2747                         break;
2748                 case CS_WAIT_STATUS_GONE:
2749                         mcs_data->update_ts = false;
2750                         mcs_data->gone_cs = true;
2751                         /*
2752                          * It is possible to get an old sequence numbers from user
2753                          * which related to already completed CSs and their fences
2754                          * already gone. In this case, CS set as completed but
2755                          * no need to consider its QID for mcs completion.
2756                          */
2757                         mcs_data->completion_bitmap |= BIT(i);
2758                         break;
2759                 default:
2760                         dev_err(hdev->dev, "Invalid fence status\n");
2761                         rc = -EINVAL;
2762                         break;
2763                 }
2764
2765         }
2766
2767         hl_fences_put(mcs_data->fence_arr, arr_len);
2768
2769         if (mcs_data->update_ts &&
2770                         (ktime_compare(first_cs_time, max_ktime) != 0))
2771                 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2772
2773         return rc;
2774 }
2775
2776 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2777                                 enum hl_cs_wait_status *status, s64 *timestamp)
2778 {
2779         struct hl_fence *fence;
2780         int rc = 0;
2781
2782         if (timestamp)
2783                 *timestamp = 0;
2784
2785         hl_ctx_get(ctx);
2786
2787         fence = hl_ctx_get_fence(ctx, seq);
2788
2789         rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2790         hl_fence_put(fence);
2791         hl_ctx_put(ctx);
2792
2793         return rc;
2794 }
2795
2796 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
2797 {
2798         if (usecs <= U32_MAX)
2799                 return usecs_to_jiffies(usecs);
2800
2801         /*
2802          * If the value in nanoseconds is larger than 64 bit, use the largest
2803          * 64 bit value.
2804          */
2805         if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
2806                 return nsecs_to_jiffies(U64_MAX);
2807
2808         return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
2809 }
2810
2811 /*
2812  * hl_wait_multi_cs_completion_init - init completion structure
2813  *
2814  * @hdev: pointer to habanalabs device structure
2815  * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2816  *                        master QID to wait on
2817  *
2818  * @return valid completion struct pointer on success, otherwise error pointer
2819  *
2820  * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2821  * the function gets the first available completion (by marking it "used")
2822  * and initialize its values.
2823  */
2824 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
2825 {
2826         struct multi_cs_completion *mcs_compl;
2827         int i;
2828
2829         /* find free multi_cs completion structure */
2830         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2831                 mcs_compl = &hdev->multi_cs_completion[i];
2832                 spin_lock(&mcs_compl->lock);
2833                 if (!mcs_compl->used) {
2834                         mcs_compl->used = 1;
2835                         mcs_compl->timestamp = 0;
2836                         /*
2837                          * init QID map to 0 to avoid completion by CSs. the actual QID map
2838                          * to multi-CS CSs will be set incrementally at a later stage
2839                          */
2840                         mcs_compl->stream_master_qid_map = 0;
2841                         spin_unlock(&mcs_compl->lock);
2842                         break;
2843                 }
2844                 spin_unlock(&mcs_compl->lock);
2845         }
2846
2847         if (i == MULTI_CS_MAX_USER_CTX) {
2848                 dev_err(hdev->dev, "no available multi-CS completion structure\n");
2849                 return ERR_PTR(-ENOMEM);
2850         }
2851         return mcs_compl;
2852 }
2853
2854 /*
2855  * hl_wait_multi_cs_completion_fini - return completion structure and set as
2856  *                                    unused
2857  *
2858  * @mcs_compl: pointer to the completion structure
2859  */
2860 static void hl_wait_multi_cs_completion_fini(
2861                                         struct multi_cs_completion *mcs_compl)
2862 {
2863         /*
2864          * free completion structure, do it under lock to be in-sync with the
2865          * thread that signals completion
2866          */
2867         spin_lock(&mcs_compl->lock);
2868         mcs_compl->used = 0;
2869         spin_unlock(&mcs_compl->lock);
2870 }
2871
2872 /*
2873  * hl_wait_multi_cs_completion - wait for first CS to complete
2874  *
2875  * @mcs_data: multi-CS internal data
2876  *
2877  * @return 0 on success, otherwise non 0 error code
2878  */
2879 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
2880                                                 struct multi_cs_completion *mcs_compl)
2881 {
2882         long completion_rc;
2883
2884         completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
2885                                                                         mcs_data->timeout_jiffies);
2886
2887         /* update timestamp */
2888         if (completion_rc > 0)
2889                 mcs_data->timestamp = mcs_compl->timestamp;
2890
2891         if (completion_rc == -ERESTARTSYS)
2892                 return completion_rc;
2893
2894         mcs_data->wait_status = completion_rc;
2895
2896         return 0;
2897 }
2898
2899 /*
2900  * hl_multi_cs_completion_init - init array of multi-CS completion structures
2901  *
2902  * @hdev: pointer to habanalabs device structure
2903  */
2904 void hl_multi_cs_completion_init(struct hl_device *hdev)
2905 {
2906         struct multi_cs_completion *mcs_cmpl;
2907         int i;
2908
2909         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2910                 mcs_cmpl = &hdev->multi_cs_completion[i];
2911                 mcs_cmpl->used = 0;
2912                 spin_lock_init(&mcs_cmpl->lock);
2913                 init_completion(&mcs_cmpl->completion);
2914         }
2915 }
2916
2917 /*
2918  * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2919  *
2920  * @hpriv: pointer to the private data of the fd
2921  * @data: pointer to multi-CS wait ioctl in/out args
2922  *
2923  */
2924 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2925 {
2926         struct multi_cs_completion *mcs_compl;
2927         struct hl_device *hdev = hpriv->hdev;
2928         struct multi_cs_data mcs_data = {};
2929         union hl_wait_cs_args *args = data;
2930         struct hl_ctx *ctx = hpriv->ctx;
2931         struct hl_fence **fence_arr;
2932         void __user *seq_arr;
2933         u32 size_to_copy;
2934         u64 *cs_seq_arr;
2935         u8 seq_arr_len;
2936         int rc, i;
2937
2938         for (i = 0 ; i < sizeof(args->in.pad) ; i++)
2939                 if (args->in.pad[i]) {
2940                         dev_dbg(hdev->dev, "Padding bytes must be 0\n");
2941                         return -EINVAL;
2942                 }
2943
2944         if (!hdev->supports_wait_for_multi_cs) {
2945                 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2946                 return -EPERM;
2947         }
2948
2949         seq_arr_len = args->in.seq_arr_len;
2950
2951         if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2952                 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2953                                 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2954                 return -EINVAL;
2955         }
2956
2957         /* allocate memory for sequence array */
2958         cs_seq_arr =
2959                 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2960         if (!cs_seq_arr)
2961                 return -ENOMEM;
2962
2963         /* copy CS sequence array from user */
2964         seq_arr = (void __user *) (uintptr_t) args->in.seq;
2965         size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2966         if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2967                 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2968                 rc = -EFAULT;
2969                 goto free_seq_arr;
2970         }
2971
2972         /* allocate array for the fences */
2973         fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
2974         if (!fence_arr) {
2975                 rc = -ENOMEM;
2976                 goto free_seq_arr;
2977         }
2978
2979         /* initialize the multi-CS internal data */
2980         mcs_data.ctx = ctx;
2981         mcs_data.seq_arr = cs_seq_arr;
2982         mcs_data.fence_arr = fence_arr;
2983         mcs_data.arr_len = seq_arr_len;
2984
2985         hl_ctx_get(ctx);
2986
2987         /* wait (with timeout) for the first CS to be completed */
2988         mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
2989         mcs_compl = hl_wait_multi_cs_completion_init(hdev);
2990         if (IS_ERR(mcs_compl)) {
2991                 rc = PTR_ERR(mcs_compl);
2992                 goto put_ctx;
2993         }
2994
2995         /* poll all CS fences, extract timestamp */
2996         mcs_data.update_ts = true;
2997         rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
2998         /*
2999          * skip wait for CS completion when one of the below is true:
3000          * - an error on the poll function
3001          * - one or more CS in the list completed
3002          * - the user called ioctl with timeout 0
3003          */
3004         if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
3005                 goto completion_fini;
3006
3007         while (true) {
3008                 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
3009                 if (rc || (mcs_data.wait_status == 0))
3010                         break;
3011
3012                 /*
3013                  * poll fences once again to update the CS map.
3014                  * no timestamp should be updated this time.
3015                  */
3016                 mcs_data.update_ts = false;
3017                 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3018
3019                 if (rc || mcs_data.completion_bitmap)
3020                         break;
3021
3022                 /*
3023                  * if hl_wait_multi_cs_completion returned before timeout (i.e.
3024                  * it got a completion) it either got completed by CS in the multi CS list
3025                  * (in which case the indication will be non empty completion_bitmap) or it
3026                  * got completed by CS submitted to one of the shared stream master but
3027                  * not in the multi CS list (in which case we should wait again but modify
3028                  * the timeout and set timestamp as zero to let a CS related to the current
3029                  * multi-CS set a new, relevant, timestamp)
3030                  */
3031                 mcs_data.timeout_jiffies = mcs_data.wait_status;
3032                 mcs_compl->timestamp = 0;
3033         }
3034
3035 completion_fini:
3036         hl_wait_multi_cs_completion_fini(mcs_compl);
3037
3038 put_ctx:
3039         hl_ctx_put(ctx);
3040         kfree(fence_arr);
3041
3042 free_seq_arr:
3043         kfree(cs_seq_arr);
3044
3045         if (rc == -ERESTARTSYS) {
3046                 dev_err_ratelimited(hdev->dev,
3047                                 "user process got signal while waiting for Multi-CS\n");
3048                 rc = -EINTR;
3049         }
3050
3051         if (rc)
3052                 return rc;
3053
3054         /* update output args */
3055         memset(args, 0, sizeof(*args));
3056
3057         if (mcs_data.completion_bitmap) {
3058                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3059                 args->out.cs_completion_map = mcs_data.completion_bitmap;
3060
3061                 /* if timestamp not 0- it's valid */
3062                 if (mcs_data.timestamp) {
3063                         args->out.timestamp_nsec = mcs_data.timestamp;
3064                         args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3065                 }
3066
3067                 /* update if some CS was gone */
3068                 if (!mcs_data.timestamp)
3069                         args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3070         } else {
3071                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3072         }
3073
3074         return 0;
3075 }
3076
3077 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3078 {
3079         struct hl_device *hdev = hpriv->hdev;
3080         union hl_wait_cs_args *args = data;
3081         enum hl_cs_wait_status status;
3082         u64 seq = args->in.seq;
3083         s64 timestamp;
3084         int rc;
3085
3086         rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, &timestamp);
3087
3088         if (rc == -ERESTARTSYS) {
3089                 dev_err_ratelimited(hdev->dev,
3090                         "user process got signal while waiting for CS handle %llu\n",
3091                         seq);
3092                 return -EINTR;
3093         }
3094
3095         memset(args, 0, sizeof(*args));
3096
3097         if (rc) {
3098                 if (rc == -ETIMEDOUT) {
3099                         dev_err_ratelimited(hdev->dev,
3100                                 "CS %llu has timed-out while user process is waiting for it\n",
3101                                 seq);
3102                         args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
3103                 } else if (rc == -EIO) {
3104                         dev_err_ratelimited(hdev->dev,
3105                                 "CS %llu has been aborted while user process is waiting for it\n",
3106                                 seq);
3107                         args->out.status = HL_WAIT_CS_STATUS_ABORTED;
3108                 }
3109                 return rc;
3110         }
3111
3112         if (timestamp) {
3113                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3114                 args->out.timestamp_nsec = timestamp;
3115         }
3116
3117         switch (status) {
3118         case CS_WAIT_STATUS_GONE:
3119                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3120                 fallthrough;
3121         case CS_WAIT_STATUS_COMPLETED:
3122                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3123                 break;
3124         case CS_WAIT_STATUS_BUSY:
3125         default:
3126                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3127                 break;
3128         }
3129
3130         return 0;
3131 }
3132
3133 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
3134                                         struct hl_cb *cq_cb,
3135                                         u64 ts_offset, u64 cq_offset, u64 target_value,
3136                                         spinlock_t *wait_list_lock,
3137                                         struct hl_user_pending_interrupt **pend)
3138 {
3139         struct hl_ts_buff *ts_buff = buf->private;
3140         struct hl_user_pending_interrupt *requested_offset_record =
3141                                 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3142                                 ts_offset;
3143         struct hl_user_pending_interrupt *cb_last =
3144                         (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3145                         (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
3146         unsigned long flags, iter_counter = 0;
3147         u64 current_cq_counter;
3148
3149         /* Validate ts_offset not exceeding last max */
3150         if (requested_offset_record >= cb_last) {
3151                 dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
3152                                                                 (u64)(uintptr_t)cb_last);
3153                 return -EINVAL;
3154         }
3155
3156 start_over:
3157         spin_lock_irqsave(wait_list_lock, flags);
3158
3159         /* Unregister only if we didn't reach the target value
3160          * since in this case there will be no handling in irq context
3161          * and then it's safe to delete the node out of the interrupt list
3162          * then re-use it on other interrupt
3163          */
3164         if (requested_offset_record->ts_reg_info.in_use) {
3165                 current_cq_counter = *requested_offset_record->cq_kernel_addr;
3166                 if (current_cq_counter < requested_offset_record->cq_target_value) {
3167                         list_del(&requested_offset_record->wait_list_node);
3168                         spin_unlock_irqrestore(wait_list_lock, flags);
3169
3170                         hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
3171                         hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
3172
3173                         dev_dbg(buf->mmg->dev,
3174                                 "ts node removed from interrupt list now can re-use\n");
3175                 } else {
3176                         dev_dbg(buf->mmg->dev,
3177                                 "ts node in middle of irq handling\n");
3178
3179                         /* irq handling in the middle give it time to finish */
3180                         spin_unlock_irqrestore(wait_list_lock, flags);
3181                         usleep_range(1, 10);
3182                         if (++iter_counter == MAX_TS_ITER_NUM) {
3183                                 dev_err(buf->mmg->dev,
3184                                         "handling registration interrupt took too long!!\n");
3185                                 return -EINVAL;
3186                         }
3187
3188                         goto start_over;
3189                 }
3190         } else {
3191                 /* Fill up the new registration node info */
3192                 requested_offset_record->ts_reg_info.buf = buf;
3193                 requested_offset_record->ts_reg_info.cq_cb = cq_cb;
3194                 requested_offset_record->ts_reg_info.timestamp_kernel_addr =
3195                                 (u64 *) ts_buff->user_buff_address + ts_offset;
3196                 requested_offset_record->cq_kernel_addr =
3197                                 (u64 *) cq_cb->kernel_address + cq_offset;
3198                 requested_offset_record->cq_target_value = target_value;
3199
3200                 spin_unlock_irqrestore(wait_list_lock, flags);
3201         }
3202
3203         *pend = requested_offset_record;
3204
3205         dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
3206                 requested_offset_record);
3207         return 0;
3208 }
3209
3210 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3211                                 struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
3212                                 u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
3213                                 u64 target_value, struct hl_user_interrupt *interrupt,
3214                                 bool register_ts_record, u64 ts_handle, u64 ts_offset,
3215                                 u32 *status, u64 *timestamp)
3216 {
3217         struct hl_user_pending_interrupt *pend;
3218         struct hl_mmap_mem_buf *buf;
3219         struct hl_cb *cq_cb;
3220         unsigned long timeout, flags;
3221         long completion_rc;
3222         int rc = 0;
3223
3224         timeout = hl_usecs64_to_jiffies(timeout_us);
3225
3226         hl_ctx_get(ctx);
3227
3228         cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
3229         if (!cq_cb) {
3230                 rc = -EINVAL;
3231                 goto put_ctx;
3232         }
3233
3234         /* Validate the cq offset */
3235         if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
3236                         ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
3237                 rc = -EINVAL;
3238                 goto put_cq_cb;
3239         }
3240
3241         if (register_ts_record) {
3242                 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
3243                                         interrupt->interrupt_id, ts_offset, cq_counters_offset);
3244                 buf = hl_mmap_mem_buf_get(mmg, ts_handle);
3245                 if (!buf) {
3246                         rc = -EINVAL;
3247                         goto put_cq_cb;
3248                 }
3249
3250                 /* get ts buffer record */
3251                 rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
3252                                                 cq_counters_offset, target_value,
3253                                                 &interrupt->wait_list_lock, &pend);
3254                 if (rc)
3255                         goto put_ts_buff;
3256         } else {
3257                 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3258                 if (!pend) {
3259                         rc = -ENOMEM;
3260                         goto put_cq_cb;
3261                 }
3262                 hl_fence_init(&pend->fence, ULONG_MAX);
3263                 pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
3264                 pend->cq_target_value = target_value;
3265         }
3266
3267         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3268
3269         /* We check for completion value as interrupt could have been received
3270          * before we added the node to the wait list
3271          */
3272         if (*pend->cq_kernel_addr >= target_value) {
3273                 if (register_ts_record)
3274                         pend->ts_reg_info.in_use = 0;
3275                 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3276
3277                 *status = HL_WAIT_CS_STATUS_COMPLETED;
3278
3279                 if (register_ts_record) {
3280                         *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
3281                         goto put_ts_buff;
3282                 } else {
3283                         pend->fence.timestamp = ktime_get();
3284                         goto set_timestamp;
3285                 }
3286         } else if (!timeout_us) {
3287                 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3288                 *status = HL_WAIT_CS_STATUS_BUSY;
3289                 pend->fence.timestamp = ktime_get();
3290                 goto set_timestamp;
3291         }
3292
3293         /* Add pending user interrupt to relevant list for the interrupt
3294          * handler to monitor.
3295          * Note that we cannot have sorted list by target value,
3296          * in order to shorten the list pass loop, since
3297          * same list could have nodes for different cq counter handle.
3298          * Note:
3299          * Mark ts buff offset as in use here in the spinlock protection area
3300          * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
3301          * before adding the node to the list. this scenario might happen when
3302          * multiple threads are racing on same offset and one thread could
3303          * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
3304          * takes over and get to ts_buff_get_kernel_ts_record and then we will try
3305          * to re-use the same ts buff offset, and will try to delete a non existing
3306          * node from the list.
3307          */
3308         if (register_ts_record)
3309                 pend->ts_reg_info.in_use = 1;
3310
3311         list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3312         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3313
3314         if (register_ts_record) {
3315                 rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
3316                 goto ts_registration_exit;
3317         }
3318
3319         /* Wait for interrupt handler to signal completion */
3320         completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3321                                                                 timeout);
3322         if (completion_rc > 0) {
3323                 *status = HL_WAIT_CS_STATUS_COMPLETED;
3324         } else {
3325                 if (completion_rc == -ERESTARTSYS) {
3326                         dev_err_ratelimited(hdev->dev,
3327                                         "user process got signal while waiting for interrupt ID %d\n",
3328                                         interrupt->interrupt_id);
3329                         rc = -EINTR;
3330                         *status = HL_WAIT_CS_STATUS_ABORTED;
3331                 } else {
3332                         if (pend->fence.error == -EIO) {
3333                                 dev_err_ratelimited(hdev->dev,
3334                                                 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3335                                                 pend->fence.error);
3336                                 rc = -EIO;
3337                                 *status = HL_WAIT_CS_STATUS_ABORTED;
3338                         } else {
3339                                 /* The wait has timed-out. We don't know anything beyond that
3340                                  * because the workload wasn't submitted through the driver.
3341                                  * Therefore, from driver's perspective, the workload is still
3342                                  * executing.
3343                                  */
3344                                 rc = 0;
3345                                 *status = HL_WAIT_CS_STATUS_BUSY;
3346                         }
3347                 }
3348         }
3349
3350         /*
3351          * We keep removing the node from list here, and not at the irq handler
3352          * for completion timeout case. and if it's a registration
3353          * for ts record, the node will be deleted in the irq handler after
3354          * we reach the target value.
3355          */
3356         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3357         list_del(&pend->wait_list_node);
3358         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3359
3360 set_timestamp:
3361         *timestamp = ktime_to_ns(pend->fence.timestamp);
3362         kfree(pend);
3363         hl_cb_put(cq_cb);
3364 ts_registration_exit:
3365         hl_ctx_put(ctx);
3366
3367         return rc;
3368
3369 put_ts_buff:
3370         hl_mmap_mem_buf_put(buf);
3371 put_cq_cb:
3372         hl_cb_put(cq_cb);
3373 put_ctx:
3374         hl_ctx_put(ctx);
3375
3376         return rc;
3377 }
3378
3379 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3380                                 u64 timeout_us, u64 user_address,
3381                                 u64 target_value, struct hl_user_interrupt *interrupt,
3382                                 u32 *status,
3383                                 u64 *timestamp)
3384 {
3385         struct hl_user_pending_interrupt *pend;
3386         unsigned long timeout, flags;
3387         u64 completion_value;
3388         long completion_rc;
3389         int rc = 0;
3390
3391         timeout = hl_usecs64_to_jiffies(timeout_us);
3392
3393         hl_ctx_get(ctx);
3394
3395         pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3396         if (!pend) {
3397                 hl_ctx_put(ctx);
3398                 return -ENOMEM;
3399         }
3400
3401         hl_fence_init(&pend->fence, ULONG_MAX);
3402
3403         /* Add pending user interrupt to relevant list for the interrupt
3404          * handler to monitor
3405          */
3406         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3407         list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3408         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3409
3410         /* We check for completion value as interrupt could have been received
3411          * before we added the node to the wait list
3412          */
3413         if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3414                 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3415                 rc = -EFAULT;
3416                 goto remove_pending_user_interrupt;
3417         }
3418
3419         if (completion_value >= target_value) {
3420                 *status = HL_WAIT_CS_STATUS_COMPLETED;
3421                 /* There was no interrupt, we assume the completion is now. */
3422                 pend->fence.timestamp = ktime_get();
3423         } else {
3424                 *status = HL_WAIT_CS_STATUS_BUSY;
3425         }
3426
3427         if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
3428                 goto remove_pending_user_interrupt;
3429
3430 wait_again:
3431         /* Wait for interrupt handler to signal completion */
3432         completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3433                                                                                 timeout);
3434
3435         /* If timeout did not expire we need to perform the comparison.
3436          * If comparison fails, keep waiting until timeout expires
3437          */
3438         if (completion_rc > 0) {
3439                 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3440                 /* reinit_completion must be called before we check for user
3441                  * completion value, otherwise, if interrupt is received after
3442                  * the comparison and before the next wait_for_completion,
3443                  * we will reach timeout and fail
3444                  */
3445                 reinit_completion(&pend->fence.completion);
3446                 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3447
3448                 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3449                         dev_err(hdev->dev, "Failed to copy completion value from user\n");
3450                         rc = -EFAULT;
3451
3452                         goto remove_pending_user_interrupt;
3453                 }
3454
3455                 if (completion_value >= target_value) {
3456                         *status = HL_WAIT_CS_STATUS_COMPLETED;
3457                 } else if (pend->fence.error) {
3458                         dev_err_ratelimited(hdev->dev,
3459                                 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3460                                 pend->fence.error);
3461                         /* set the command completion status as ABORTED */
3462                         *status = HL_WAIT_CS_STATUS_ABORTED;
3463                 } else {
3464                         timeout = completion_rc;
3465                         goto wait_again;
3466                 }
3467         } else if (completion_rc == -ERESTARTSYS) {
3468                 dev_err_ratelimited(hdev->dev,
3469                         "user process got signal while waiting for interrupt ID %d\n",
3470                         interrupt->interrupt_id);
3471                 rc = -EINTR;
3472         } else {
3473                 /* The wait has timed-out. We don't know anything beyond that
3474                  * because the workload wasn't submitted through the driver.
3475                  * Therefore, from driver's perspective, the workload is still
3476                  * executing.
3477                  */
3478                 rc = 0;
3479                 *status = HL_WAIT_CS_STATUS_BUSY;
3480         }
3481
3482 remove_pending_user_interrupt:
3483         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3484         list_del(&pend->wait_list_node);
3485         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3486
3487         *timestamp = ktime_to_ns(pend->fence.timestamp);
3488
3489         kfree(pend);
3490         hl_ctx_put(ctx);
3491
3492         return rc;
3493 }
3494
3495 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3496 {
3497         u16 interrupt_id, first_interrupt, last_interrupt;
3498         struct hl_device *hdev = hpriv->hdev;
3499         struct asic_fixed_properties *prop;
3500         struct hl_user_interrupt *interrupt;
3501         union hl_wait_cs_args *args = data;
3502         u32 status = HL_WAIT_CS_STATUS_BUSY;
3503         u64 timestamp = 0;
3504         int rc, int_idx;
3505
3506         prop = &hdev->asic_prop;
3507
3508         if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
3509                 dev_err(hdev->dev, "no user interrupts allowed");
3510                 return -EPERM;
3511         }
3512
3513         interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
3514
3515         first_interrupt = prop->first_available_user_interrupt;
3516         last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
3517
3518         if (interrupt_id < prop->user_dec_intr_count) {
3519
3520                 /* Check if the requested core is enabled */
3521                 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
3522                         dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
3523                                 interrupt_id);
3524                         return -EINVAL;
3525                 }
3526
3527                 interrupt = &hdev->user_interrupt[interrupt_id];
3528
3529         } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
3530
3531                 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
3532                 interrupt = &hdev->user_interrupt[int_idx];
3533
3534         } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
3535                 interrupt = &hdev->common_user_cq_interrupt;
3536         } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
3537                 interrupt = &hdev->common_decoder_interrupt;
3538         } else {
3539                 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
3540                 return -EINVAL;
3541         }
3542
3543         if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
3544                 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
3545                                 args->in.interrupt_timeout_us, args->in.cq_counters_handle,
3546                                 args->in.cq_counters_offset,
3547                                 args->in.target, interrupt,
3548                                 !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
3549                                 args->in.timestamp_handle, args->in.timestamp_offset,
3550                                 &status, &timestamp);
3551         else
3552                 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
3553                                 args->in.interrupt_timeout_us, args->in.addr,
3554                                 args->in.target, interrupt, &status,
3555                                 &timestamp);
3556         if (rc)
3557                 return rc;
3558
3559         memset(args, 0, sizeof(*args));
3560         args->out.status = status;
3561
3562         if (timestamp) {
3563                 args->out.timestamp_nsec = timestamp;
3564                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3565         }
3566
3567         return 0;
3568 }
3569
3570 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3571 {
3572         struct hl_device *hdev = hpriv->hdev;
3573         union hl_wait_cs_args *args = data;
3574         u32 flags = args->in.flags;
3575         int rc;
3576
3577         /* If the device is not operational, or if an error has happened and user should release the
3578          * device, there is no point in waiting for any command submission or user interrupt.
3579          */
3580         if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
3581                 return -EBUSY;
3582
3583         if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
3584                 rc = hl_interrupt_wait_ioctl(hpriv, data);
3585         else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
3586                 rc = hl_multi_cs_wait_ioctl(hpriv, data);
3587         else
3588                 rc = hl_cs_wait_ioctl(hpriv, data);
3589
3590         return rc;
3591 }