d89b539ee5b9444558c1018061be112f3359141d
[linux-2.6-microblaze.git] / drivers / accel / habanalabs / common / command_submission.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2021 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK   (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15                         HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
16                         HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
17                         HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
18
19
20 #define MAX_TS_ITER_NUM 100
21
22 /**
23  * enum hl_cs_wait_status - cs wait status
24  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
25  * @CS_WAIT_STATUS_COMPLETED: cs completed
26  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
27  */
28 enum hl_cs_wait_status {
29         CS_WAIT_STATUS_BUSY,
30         CS_WAIT_STATUS_COMPLETED,
31         CS_WAIT_STATUS_GONE
32 };
33
34 static void job_wq_completion(struct work_struct *work);
35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
36                                 enum hl_cs_wait_status *status, s64 *timestamp);
37 static void cs_do_release(struct kref *ref);
38
39 static void hl_push_cs_outcome(struct hl_device *hdev,
40                                struct hl_cs_outcome_store *outcome_store,
41                                u64 seq, ktime_t ts, int error)
42 {
43         struct hl_cs_outcome *node;
44         unsigned long flags;
45
46         /*
47          * CS outcome store supports the following operations:
48          * push outcome - store a recent CS outcome in the store
49          * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
50          * It uses 2 lists: used list and free list.
51          * It has a pre-allocated amount of nodes, each node stores
52          * a single CS outcome.
53          * Initially, all the nodes are in the free list.
54          * On push outcome, a node (any) is taken from the free list, its
55          * information is filled in, and the node is moved to the used list.
56          * It is possible, that there are no nodes left in the free list.
57          * In this case, we will lose some information about old outcomes. We
58          * will pop the OLDEST node from the used list, and make it free.
59          * On pop, the node is searched for in the used list (using a search
60          * index).
61          * If found, the node is then removed from the used list, and moved
62          * back to the free list. The outcome data that the node contained is
63          * returned back to the user.
64          */
65
66         spin_lock_irqsave(&outcome_store->db_lock, flags);
67
68         if (list_empty(&outcome_store->free_list)) {
69                 node = list_last_entry(&outcome_store->used_list,
70                                        struct hl_cs_outcome, list_link);
71                 hash_del(&node->map_link);
72                 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
73         } else {
74                 node = list_last_entry(&outcome_store->free_list,
75                                        struct hl_cs_outcome, list_link);
76         }
77
78         list_del_init(&node->list_link);
79
80         node->seq = seq;
81         node->ts = ts;
82         node->error = error;
83
84         list_add(&node->list_link, &outcome_store->used_list);
85         hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
86
87         spin_unlock_irqrestore(&outcome_store->db_lock, flags);
88 }
89
90 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
91                                u64 seq, ktime_t *ts, int *error)
92 {
93         struct hl_cs_outcome *node;
94         unsigned long flags;
95
96         spin_lock_irqsave(&outcome_store->db_lock, flags);
97
98         hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
99                 if (node->seq == seq) {
100                         *ts = node->ts;
101                         *error = node->error;
102
103                         hash_del(&node->map_link);
104                         list_del_init(&node->list_link);
105                         list_add(&node->list_link, &outcome_store->free_list);
106
107                         spin_unlock_irqrestore(&outcome_store->db_lock, flags);
108
109                         return true;
110                 }
111
112         spin_unlock_irqrestore(&outcome_store->db_lock, flags);
113
114         return false;
115 }
116
117 static void hl_sob_reset(struct kref *ref)
118 {
119         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
120                                                         kref);
121         struct hl_device *hdev = hw_sob->hdev;
122
123         dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
124
125         hdev->asic_funcs->reset_sob(hdev, hw_sob);
126
127         hw_sob->need_reset = false;
128 }
129
130 void hl_sob_reset_error(struct kref *ref)
131 {
132         struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
133                                                         kref);
134         struct hl_device *hdev = hw_sob->hdev;
135
136         dev_crit(hdev->dev,
137                 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
138                 hw_sob->q_idx, hw_sob->sob_id);
139 }
140
141 void hw_sob_put(struct hl_hw_sob *hw_sob)
142 {
143         if (hw_sob)
144                 kref_put(&hw_sob->kref, hl_sob_reset);
145 }
146
147 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
148 {
149         if (hw_sob)
150                 kref_put(&hw_sob->kref, hl_sob_reset_error);
151 }
152
153 void hw_sob_get(struct hl_hw_sob *hw_sob)
154 {
155         if (hw_sob)
156                 kref_get(&hw_sob->kref);
157 }
158
159 /**
160  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
161  * @sob_base: sob base id
162  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
163  * @mask: generated mask
164  *
165  * Return: 0 if given parameters are valid
166  */
167 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
168 {
169         int i;
170
171         if (sob_mask == 0)
172                 return -EINVAL;
173
174         if (sob_mask == 0x1) {
175                 *mask = ~(1 << (sob_base & 0x7));
176         } else {
177                 /* find msb in order to verify sob range is valid */
178                 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
179                         if (BIT(i) & sob_mask)
180                                 break;
181
182                 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
183                         return -EINVAL;
184
185                 *mask = ~sob_mask;
186         }
187
188         return 0;
189 }
190
191 static void hl_fence_release(struct kref *kref)
192 {
193         struct hl_fence *fence =
194                 container_of(kref, struct hl_fence, refcount);
195         struct hl_cs_compl *hl_cs_cmpl =
196                 container_of(fence, struct hl_cs_compl, base_fence);
197
198         kfree(hl_cs_cmpl);
199 }
200
201 void hl_fence_put(struct hl_fence *fence)
202 {
203         if (IS_ERR_OR_NULL(fence))
204                 return;
205         kref_put(&fence->refcount, hl_fence_release);
206 }
207
208 void hl_fences_put(struct hl_fence **fence, int len)
209 {
210         int i;
211
212         for (i = 0; i < len; i++, fence++)
213                 hl_fence_put(*fence);
214 }
215
216 void hl_fence_get(struct hl_fence *fence)
217 {
218         if (fence)
219                 kref_get(&fence->refcount);
220 }
221
222 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
223 {
224         kref_init(&fence->refcount);
225         fence->cs_sequence = sequence;
226         fence->error = 0;
227         fence->timestamp = ktime_set(0, 0);
228         fence->mcs_handling_done = false;
229         init_completion(&fence->completion);
230 }
231
232 void cs_get(struct hl_cs *cs)
233 {
234         kref_get(&cs->refcount);
235 }
236
237 static int cs_get_unless_zero(struct hl_cs *cs)
238 {
239         return kref_get_unless_zero(&cs->refcount);
240 }
241
242 static void cs_put(struct hl_cs *cs)
243 {
244         kref_put(&cs->refcount, cs_do_release);
245 }
246
247 static void cs_job_do_release(struct kref *ref)
248 {
249         struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
250
251         kfree(job);
252 }
253
254 static void hl_cs_job_put(struct hl_cs_job *job)
255 {
256         kref_put(&job->refcount, cs_job_do_release);
257 }
258
259 bool cs_needs_completion(struct hl_cs *cs)
260 {
261         /* In case this is a staged CS, only the last CS in sequence should
262          * get a completion, any non staged CS will always get a completion
263          */
264         if (cs->staged_cs && !cs->staged_last)
265                 return false;
266
267         return true;
268 }
269
270 bool cs_needs_timeout(struct hl_cs *cs)
271 {
272         /* In case this is a staged CS, only the first CS in sequence should
273          * get a timeout, any non staged CS will always get a timeout
274          */
275         if (cs->staged_cs && !cs->staged_first)
276                 return false;
277
278         return true;
279 }
280
281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
282 {
283         /*
284          * Patched CB is created for external queues jobs, and for H/W queues
285          * jobs if the user CB was allocated by driver and MMU is disabled.
286          */
287         return (job->queue_type == QUEUE_TYPE_EXT ||
288                         (job->queue_type == QUEUE_TYPE_HW &&
289                                         job->is_kernel_allocated_cb &&
290                                         !hdev->mmu_enable));
291 }
292
293 /*
294  * cs_parser - parse the user command submission
295  *
296  * @hpriv       : pointer to the private data of the fd
297  * @job        : pointer to the job that holds the command submission info
298  *
299  * The function parses the command submission of the user. It calls the
300  * ASIC specific parser, which returns a list of memory blocks to send
301  * to the device as different command buffers
302  *
303  */
304 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
305 {
306         struct hl_device *hdev = hpriv->hdev;
307         struct hl_cs_parser parser;
308         int rc;
309
310         parser.ctx_id = job->cs->ctx->asid;
311         parser.cs_sequence = job->cs->sequence;
312         parser.job_id = job->id;
313
314         parser.hw_queue_id = job->hw_queue_id;
315         parser.job_userptr_list = &job->userptr_list;
316         parser.patched_cb = NULL;
317         parser.user_cb = job->user_cb;
318         parser.user_cb_size = job->user_cb_size;
319         parser.queue_type = job->queue_type;
320         parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
321         job->patched_cb = NULL;
322         parser.completion = cs_needs_completion(job->cs);
323
324         rc = hdev->asic_funcs->cs_parser(hdev, &parser);
325
326         if (is_cb_patched(hdev, job)) {
327                 if (!rc) {
328                         job->patched_cb = parser.patched_cb;
329                         job->job_cb_size = parser.patched_cb_size;
330                         job->contains_dma_pkt = parser.contains_dma_pkt;
331                         atomic_inc(&job->patched_cb->cs_cnt);
332                 }
333
334                 /*
335                  * Whether the parsing worked or not, we don't need the
336                  * original CB anymore because it was already parsed and
337                  * won't be accessed again for this CS
338                  */
339                 atomic_dec(&job->user_cb->cs_cnt);
340                 hl_cb_put(job->user_cb);
341                 job->user_cb = NULL;
342         } else if (!rc) {
343                 job->job_cb_size = job->user_cb_size;
344         }
345
346         return rc;
347 }
348
349 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
350 {
351         struct hl_cs *cs = job->cs;
352
353         if (is_cb_patched(hdev, job)) {
354                 hl_userptr_delete_list(hdev, &job->userptr_list);
355
356                 /*
357                  * We might arrive here from rollback and patched CB wasn't
358                  * created, so we need to check it's not NULL
359                  */
360                 if (job->patched_cb) {
361                         atomic_dec(&job->patched_cb->cs_cnt);
362                         hl_cb_put(job->patched_cb);
363                 }
364         }
365
366         /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
367          * enabled, the user CB isn't released in cs_parser() and thus should be
368          * released here. This is also true for INT queues jobs which were
369          * allocated by driver.
370          */
371         if ((job->is_kernel_allocated_cb &&
372                 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
373                                 job->queue_type == QUEUE_TYPE_INT))) {
374                 atomic_dec(&job->user_cb->cs_cnt);
375                 hl_cb_put(job->user_cb);
376         }
377
378         /*
379          * This is the only place where there can be multiple threads
380          * modifying the list at the same time
381          */
382         spin_lock(&cs->job_lock);
383         list_del(&job->cs_node);
384         spin_unlock(&cs->job_lock);
385
386         hl_debugfs_remove_job(hdev, job);
387
388         /* We decrement reference only for a CS that gets completion
389          * because the reference was incremented only for this kind of CS
390          * right before it was scheduled.
391          *
392          * In staged submission, only the last CS marked as 'staged_last'
393          * gets completion, hence its release function will be called from here.
394          * As for all the rest CS's in the staged submission which do not get
395          * completion, their CS reference will be decremented by the
396          * 'staged_last' CS during the CS release flow.
397          * All relevant PQ CI counters will be incremented during the CS release
398          * flow by calling 'hl_hw_queue_update_ci'.
399          */
400         if (cs_needs_completion(cs) &&
401                         (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
402
403                 /* In CS based completions, the timestamp is already available,
404                  * so no need to extract it from job
405                  */
406                 if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
407                         cs->completion_timestamp = job->timestamp;
408
409                 cs_put(cs);
410         }
411
412         hl_cs_job_put(job);
413 }
414
415 /*
416  * hl_staged_cs_find_first - locate the first CS in this staged submission
417  *
418  * @hdev: pointer to device structure
419  * @cs_seq: staged submission sequence number
420  *
421  * @note: This function must be called under 'hdev->cs_mirror_lock'
422  *
423  * Find and return a CS pointer with the given sequence
424  */
425 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
426 {
427         struct hl_cs *cs;
428
429         list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
430                 if (cs->staged_cs && cs->staged_first &&
431                                 cs->sequence == cs_seq)
432                         return cs;
433
434         return NULL;
435 }
436
437 /*
438  * is_staged_cs_last_exists - returns true if the last CS in sequence exists
439  *
440  * @hdev: pointer to device structure
441  * @cs: staged submission member
442  *
443  */
444 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
445 {
446         struct hl_cs *last_entry;
447
448         last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
449                                                                 staged_cs_node);
450
451         if (last_entry->staged_last)
452                 return true;
453
454         return false;
455 }
456
457 /*
458  * staged_cs_get - get CS reference if this CS is a part of a staged CS
459  *
460  * @hdev: pointer to device structure
461  * @cs: current CS
462  * @cs_seq: staged submission sequence number
463  *
464  * Increment CS reference for every CS in this staged submission except for
465  * the CS which get completion.
466  */
467 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
468 {
469         /* Only the last CS in this staged submission will get a completion.
470          * We must increment the reference for all other CS's in this
471          * staged submission.
472          * Once we get a completion we will release the whole staged submission.
473          */
474         if (!cs->staged_last)
475                 cs_get(cs);
476 }
477
478 /*
479  * staged_cs_put - put a CS in case it is part of staged submission
480  *
481  * @hdev: pointer to device structure
482  * @cs: CS to put
483  *
484  * This function decrements a CS reference (for a non completion CS)
485  */
486 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
487 {
488         /* We release all CS's in a staged submission except the last
489          * CS which we have never incremented its reference.
490          */
491         if (!cs_needs_completion(cs))
492                 cs_put(cs);
493 }
494
495 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
496 {
497         struct hl_cs *next = NULL, *iter, *first_cs;
498
499         if (!cs_needs_timeout(cs))
500                 return;
501
502         spin_lock(&hdev->cs_mirror_lock);
503
504         /* We need to handle tdr only once for the complete staged submission.
505          * Hence, we choose the CS that reaches this function first which is
506          * the CS marked as 'staged_last'.
507          * In case single staged cs was submitted which has both first and last
508          * indications, then "cs_find_first" below will return NULL, since we
509          * removed the cs node from the list before getting here,
510          * in such cases just continue with the cs to cancel it's TDR work.
511          */
512         if (cs->staged_cs && cs->staged_last) {
513                 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
514                 if (first_cs)
515                         cs = first_cs;
516         }
517
518         spin_unlock(&hdev->cs_mirror_lock);
519
520         /* Don't cancel TDR in case this CS was timedout because we might be
521          * running from the TDR context
522          */
523         if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
524                 return;
525
526         if (cs->tdr_active)
527                 cancel_delayed_work_sync(&cs->work_tdr);
528
529         spin_lock(&hdev->cs_mirror_lock);
530
531         /* queue TDR for next CS */
532         list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
533                 if (cs_needs_timeout(iter)) {
534                         next = iter;
535                         break;
536                 }
537
538         if (next && !next->tdr_active) {
539                 next->tdr_active = true;
540                 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
541         }
542
543         spin_unlock(&hdev->cs_mirror_lock);
544 }
545
546 /*
547  * force_complete_multi_cs - complete all contexts that wait on multi-CS
548  *
549  * @hdev: pointer to habanalabs device structure
550  */
551 static void force_complete_multi_cs(struct hl_device *hdev)
552 {
553         int i;
554
555         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
556                 struct multi_cs_completion *mcs_compl;
557
558                 mcs_compl = &hdev->multi_cs_completion[i];
559
560                 spin_lock(&mcs_compl->lock);
561
562                 if (!mcs_compl->used) {
563                         spin_unlock(&mcs_compl->lock);
564                         continue;
565                 }
566
567                 /* when calling force complete no context should be waiting on
568                  * multi-cS.
569                  * We are calling the function as a protection for such case
570                  * to free any pending context and print error message
571                  */
572                 dev_err(hdev->dev,
573                                 "multi-CS completion context %d still waiting when calling force completion\n",
574                                 i);
575                 complete_all(&mcs_compl->completion);
576                 spin_unlock(&mcs_compl->lock);
577         }
578 }
579
580 /*
581  * complete_multi_cs - complete all waiting entities on multi-CS
582  *
583  * @hdev: pointer to habanalabs device structure
584  * @cs: CS structure
585  * The function signals a waiting entity that has an overlapping stream masters
586  * with the completed CS.
587  * For example:
588  * - a completed CS worked on stream master QID 4, multi CS completion
589  *   is actively waiting on stream master QIDs 3, 5. don't send signal as no
590  *   common stream master QID
591  * - a completed CS worked on stream master QID 4, multi CS completion
592  *   is actively waiting on stream master QIDs 3, 4. send signal as stream
593  *   master QID 4 is common
594  */
595 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
596 {
597         struct hl_fence *fence = cs->fence;
598         int i;
599
600         /* in case of multi CS check for completion only for the first CS */
601         if (cs->staged_cs && !cs->staged_first)
602                 return;
603
604         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
605                 struct multi_cs_completion *mcs_compl;
606
607                 mcs_compl = &hdev->multi_cs_completion[i];
608                 if (!mcs_compl->used)
609                         continue;
610
611                 spin_lock(&mcs_compl->lock);
612
613                 /*
614                  * complete if:
615                  * 1. still waiting for completion
616                  * 2. the completed CS has at least one overlapping stream
617                  *    master with the stream masters in the completion
618                  */
619                 if (mcs_compl->used &&
620                                 (fence->stream_master_qid_map &
621                                         mcs_compl->stream_master_qid_map)) {
622                         /* extract the timestamp only of first completed CS */
623                         if (!mcs_compl->timestamp)
624                                 mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
625
626                         complete_all(&mcs_compl->completion);
627
628                         /*
629                          * Setting mcs_handling_done inside the lock ensures
630                          * at least one fence have mcs_handling_done set to
631                          * true before wait for mcs finish. This ensures at
632                          * least one CS will be set as completed when polling
633                          * mcs fences.
634                          */
635                         fence->mcs_handling_done = true;
636                 }
637
638                 spin_unlock(&mcs_compl->lock);
639         }
640         /* In case CS completed without mcs completion initialized */
641         fence->mcs_handling_done = true;
642 }
643
644 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
645                                         struct hl_cs *cs,
646                                         struct hl_cs_compl *hl_cs_cmpl)
647 {
648         /* Skip this handler if the cs wasn't submitted, to avoid putting
649          * the hw_sob twice, since this case already handled at this point,
650          * also skip if the hw_sob pointer wasn't set.
651          */
652         if (!hl_cs_cmpl->hw_sob || !cs->submitted)
653                 return;
654
655         spin_lock(&hl_cs_cmpl->lock);
656
657         /*
658          * we get refcount upon reservation of signals or signal/wait cs for the
659          * hw_sob object, and need to put it when the first staged cs
660          * (which cotains the encaps signals) or cs signal/wait is completed.
661          */
662         if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
663                         (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
664                         (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
665                         (!!hl_cs_cmpl->encaps_signals)) {
666                 dev_dbg(hdev->dev,
667                                 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
668                                 hl_cs_cmpl->cs_seq,
669                                 hl_cs_cmpl->type,
670                                 hl_cs_cmpl->hw_sob->sob_id,
671                                 hl_cs_cmpl->sob_val);
672
673                 hw_sob_put(hl_cs_cmpl->hw_sob);
674
675                 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
676                         hdev->asic_funcs->reset_sob_group(hdev,
677                                         hl_cs_cmpl->sob_group);
678         }
679
680         spin_unlock(&hl_cs_cmpl->lock);
681 }
682
683 static void cs_do_release(struct kref *ref)
684 {
685         struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
686         struct hl_device *hdev = cs->ctx->hdev;
687         struct hl_cs_job *job, *tmp;
688         struct hl_cs_compl *hl_cs_cmpl =
689                         container_of(cs->fence, struct hl_cs_compl, base_fence);
690
691         cs->completed = true;
692
693         /*
694          * Although if we reached here it means that all external jobs have
695          * finished, because each one of them took refcnt to CS, we still
696          * need to go over the internal jobs and complete them. Otherwise, we
697          * will have leaked memory and what's worse, the CS object (and
698          * potentially the CTX object) could be released, while the JOB
699          * still holds a pointer to them (but no reference).
700          */
701         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
702                 hl_complete_job(hdev, job);
703
704         if (!cs->submitted) {
705                 /*
706                  * In case the wait for signal CS was submitted, the fence put
707                  * occurs in init_signal_wait_cs() or collective_wait_init_cs()
708                  * right before hanging on the PQ.
709                  */
710                 if (cs->type == CS_TYPE_WAIT ||
711                                 cs->type == CS_TYPE_COLLECTIVE_WAIT)
712                         hl_fence_put(cs->signal_fence);
713
714                 goto out;
715         }
716
717         /* Need to update CI for all queue jobs that does not get completion */
718         hl_hw_queue_update_ci(cs);
719
720         /* remove CS from CS mirror list */
721         spin_lock(&hdev->cs_mirror_lock);
722         list_del_init(&cs->mirror_node);
723         spin_unlock(&hdev->cs_mirror_lock);
724
725         cs_handle_tdr(hdev, cs);
726
727         if (cs->staged_cs) {
728                 /* the completion CS decrements reference for the entire
729                  * staged submission
730                  */
731                 if (cs->staged_last) {
732                         struct hl_cs *staged_cs, *tmp_cs;
733
734                         list_for_each_entry_safe(staged_cs, tmp_cs,
735                                         &cs->staged_cs_node, staged_cs_node)
736                                 staged_cs_put(hdev, staged_cs);
737                 }
738
739                 /* A staged CS will be a member in the list only after it
740                  * was submitted. We used 'cs_mirror_lock' when inserting
741                  * it to list so we will use it again when removing it
742                  */
743                 if (cs->submitted) {
744                         spin_lock(&hdev->cs_mirror_lock);
745                         list_del(&cs->staged_cs_node);
746                         spin_unlock(&hdev->cs_mirror_lock);
747                 }
748
749                 /* decrement refcount to handle when first staged cs
750                  * with encaps signals is completed.
751                  */
752                 if (hl_cs_cmpl->encaps_signals)
753                         kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
754                                         hl_encaps_release_handle_and_put_ctx);
755         }
756
757         if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
758                 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
759
760 out:
761         /* Must be called before hl_ctx_put because inside we use ctx to get
762          * the device
763          */
764         hl_debugfs_remove_cs(cs);
765
766         hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
767
768         /* We need to mark an error for not submitted because in that case
769          * the hl fence release flow is different. Mainly, we don't need
770          * to handle hw_sob for signal/wait
771          */
772         if (cs->timedout)
773                 cs->fence->error = -ETIMEDOUT;
774         else if (cs->aborted)
775                 cs->fence->error = -EIO;
776         else if (!cs->submitted)
777                 cs->fence->error = -EBUSY;
778
779         if (unlikely(cs->skip_reset_on_timeout)) {
780                 dev_err(hdev->dev,
781                         "Command submission %llu completed after %llu (s)\n",
782                         cs->sequence,
783                         div_u64(jiffies - cs->submission_time_jiffies, HZ));
784         }
785
786         if (cs->timestamp) {
787                 cs->fence->timestamp = cs->completion_timestamp;
788                 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
789                                    cs->fence->timestamp, cs->fence->error);
790         }
791
792         hl_ctx_put(cs->ctx);
793
794         complete_all(&cs->fence->completion);
795         complete_multi_cs(hdev, cs);
796
797         cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
798
799         hl_fence_put(cs->fence);
800
801         kfree(cs->jobs_in_queue_cnt);
802         kfree(cs);
803 }
804
805 static void cs_timedout(struct work_struct *work)
806 {
807         struct hl_device *hdev;
808         u64 event_mask = 0x0;
809         int rc;
810         struct hl_cs *cs = container_of(work, struct hl_cs,
811                                                  work_tdr.work);
812         bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
813
814         rc = cs_get_unless_zero(cs);
815         if (!rc)
816                 return;
817
818         if ((!cs->submitted) || (cs->completed)) {
819                 cs_put(cs);
820                 return;
821         }
822
823         hdev = cs->ctx->hdev;
824
825         if (likely(!skip_reset_on_timeout)) {
826                 if (hdev->reset_on_lockup)
827                         device_reset = true;
828                 else
829                         hdev->reset_info.needs_reset = true;
830
831                 /* Mark the CS is timed out so we won't try to cancel its TDR */
832                 cs->timedout = true;
833         }
834
835         /* Save only the first CS timeout parameters */
836         rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
837         if (rc) {
838                 hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
839                 hdev->captured_err_info.cs_timeout.seq = cs->sequence;
840                 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
841         }
842
843         switch (cs->type) {
844         case CS_TYPE_SIGNAL:
845                 dev_err(hdev->dev,
846                         "Signal command submission %llu has not finished in time!\n",
847                         cs->sequence);
848                 break;
849
850         case CS_TYPE_WAIT:
851                 dev_err(hdev->dev,
852                         "Wait command submission %llu has not finished in time!\n",
853                         cs->sequence);
854                 break;
855
856         case CS_TYPE_COLLECTIVE_WAIT:
857                 dev_err(hdev->dev,
858                         "Collective Wait command submission %llu has not finished in time!\n",
859                         cs->sequence);
860                 break;
861
862         default:
863                 dev_err(hdev->dev,
864                         "Command submission %llu has not finished in time!\n",
865                         cs->sequence);
866                 break;
867         }
868
869         rc = hl_state_dump(hdev);
870         if (rc)
871                 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
872
873         cs_put(cs);
874
875         if (device_reset) {
876                 event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
877                 hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
878         } else if (event_mask) {
879                 hl_notifier_event_send_all(hdev, event_mask);
880         }
881 }
882
883 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
884                         enum hl_cs_type cs_type, u64 user_sequence,
885                         struct hl_cs **cs_new, u32 flags, u32 timeout)
886 {
887         struct hl_cs_counters_atomic *cntr;
888         struct hl_fence *other = NULL;
889         struct hl_cs_compl *cs_cmpl;
890         struct hl_cs *cs;
891         int rc;
892
893         cntr = &hdev->aggregated_cs_counters;
894
895         cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
896         if (!cs)
897                 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
898
899         if (!cs) {
900                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
901                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
902                 return -ENOMEM;
903         }
904
905         /* increment refcnt for context */
906         hl_ctx_get(ctx);
907
908         cs->ctx = ctx;
909         cs->submitted = false;
910         cs->completed = false;
911         cs->type = cs_type;
912         cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
913         cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
914         cs->timeout_jiffies = timeout;
915         cs->skip_reset_on_timeout =
916                 hdev->reset_info.skip_reset_on_timeout ||
917                 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
918         cs->submission_time_jiffies = jiffies;
919         INIT_LIST_HEAD(&cs->job_list);
920         INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
921         kref_init(&cs->refcount);
922         spin_lock_init(&cs->job_lock);
923
924         cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
925         if (!cs_cmpl)
926                 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
927
928         if (!cs_cmpl) {
929                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
930                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
931                 rc = -ENOMEM;
932                 goto free_cs;
933         }
934
935         cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
936                         sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
937         if (!cs->jobs_in_queue_cnt)
938                 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
939                                 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
940
941         if (!cs->jobs_in_queue_cnt) {
942                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
943                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
944                 rc = -ENOMEM;
945                 goto free_cs_cmpl;
946         }
947
948         cs_cmpl->hdev = hdev;
949         cs_cmpl->type = cs->type;
950         spin_lock_init(&cs_cmpl->lock);
951         cs->fence = &cs_cmpl->base_fence;
952
953         spin_lock(&ctx->cs_lock);
954
955         cs_cmpl->cs_seq = ctx->cs_sequence;
956         other = ctx->cs_pending[cs_cmpl->cs_seq &
957                                 (hdev->asic_prop.max_pending_cs - 1)];
958
959         if (other && !completion_done(&other->completion)) {
960                 /* If the following statement is true, it means we have reached
961                  * a point in which only part of the staged submission was
962                  * submitted and we don't have enough room in the 'cs_pending'
963                  * array for the rest of the submission.
964                  * This causes a deadlock because this CS will never be
965                  * completed as it depends on future CS's for completion.
966                  */
967                 if (other->cs_sequence == user_sequence)
968                         dev_crit_ratelimited(hdev->dev,
969                                 "Staged CS %llu deadlock due to lack of resources",
970                                 user_sequence);
971
972                 dev_dbg_ratelimited(hdev->dev,
973                         "Rejecting CS because of too many in-flights CS\n");
974                 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
975                 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
976                 rc = -EAGAIN;
977                 goto free_fence;
978         }
979
980         /* init hl_fence */
981         hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
982
983         cs->sequence = cs_cmpl->cs_seq;
984
985         ctx->cs_pending[cs_cmpl->cs_seq &
986                         (hdev->asic_prop.max_pending_cs - 1)] =
987                                                         &cs_cmpl->base_fence;
988         ctx->cs_sequence++;
989
990         hl_fence_get(&cs_cmpl->base_fence);
991
992         hl_fence_put(other);
993
994         spin_unlock(&ctx->cs_lock);
995
996         *cs_new = cs;
997
998         return 0;
999
1000 free_fence:
1001         spin_unlock(&ctx->cs_lock);
1002         kfree(cs->jobs_in_queue_cnt);
1003 free_cs_cmpl:
1004         kfree(cs_cmpl);
1005 free_cs:
1006         kfree(cs);
1007         hl_ctx_put(ctx);
1008         return rc;
1009 }
1010
1011 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
1012 {
1013         struct hl_cs_job *job, *tmp;
1014
1015         staged_cs_put(hdev, cs);
1016
1017         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1018                 hl_complete_job(hdev, job);
1019 }
1020
1021 /*
1022  * release_reserved_encaps_signals() - release reserved encapsulated signals.
1023  * @hdev: pointer to habanalabs device structure
1024  *
1025  * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
1026  * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
1027  * For these signals need also to put the refcount of the H/W SOB which was taken at the
1028  * reservation.
1029  */
1030 static void release_reserved_encaps_signals(struct hl_device *hdev)
1031 {
1032         struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
1033         struct hl_cs_encaps_sig_handle *handle;
1034         struct hl_encaps_signals_mgr *mgr;
1035         u32 id;
1036
1037         if (!ctx)
1038                 return;
1039
1040         mgr = &ctx->sig_mgr;
1041
1042         idr_for_each_entry(&mgr->handles, handle, id)
1043                 if (handle->cs_seq == ULLONG_MAX)
1044                         kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
1045
1046         hl_ctx_put(ctx);
1047 }
1048
1049 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
1050 {
1051         int i;
1052         struct hl_cs *cs, *tmp;
1053
1054         if (!skip_wq_flush) {
1055                 flush_workqueue(hdev->ts_free_obj_wq);
1056
1057                 /* flush all completions before iterating over the CS mirror list in
1058                  * order to avoid a race with the release functions
1059                  */
1060                 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1061                         flush_workqueue(hdev->cq_wq[i]);
1062
1063                 flush_workqueue(hdev->cs_cmplt_wq);
1064         }
1065
1066         /* Make sure we don't have leftovers in the CS mirror list */
1067         list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
1068                 cs_get(cs);
1069                 cs->aborted = true;
1070                 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
1071                                         cs->ctx->asid, cs->sequence);
1072                 cs_rollback(hdev, cs);
1073                 cs_put(cs);
1074         }
1075
1076         force_complete_multi_cs(hdev);
1077
1078         release_reserved_encaps_signals(hdev);
1079 }
1080
1081 static void
1082 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
1083 {
1084         struct hl_user_pending_interrupt *pend, *temp;
1085         unsigned long flags;
1086
1087         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
1088         list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
1089                 if (pend->ts_reg_info.buf) {
1090                         list_del(&pend->wait_list_node);
1091                         hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
1092                         hl_cb_put(pend->ts_reg_info.cq_cb);
1093                 } else {
1094                         pend->fence.error = -EIO;
1095                         complete_all(&pend->fence.completion);
1096                 }
1097         }
1098         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
1099 }
1100
1101 void hl_release_pending_user_interrupts(struct hl_device *hdev)
1102 {
1103         struct asic_fixed_properties *prop = &hdev->asic_prop;
1104         struct hl_user_interrupt *interrupt;
1105         int i;
1106
1107         if (!prop->user_interrupt_count)
1108                 return;
1109
1110         /* We iterate through the user interrupt requests and waking up all
1111          * user threads waiting for interrupt completion. We iterate the
1112          * list under a lock, this is why all user threads, once awake,
1113          * will wait on the same lock and will release the waiting object upon
1114          * unlock.
1115          */
1116
1117         for (i = 0 ; i < prop->user_interrupt_count ; i++) {
1118                 interrupt = &hdev->user_interrupt[i];
1119                 wake_pending_user_interrupt_threads(interrupt);
1120         }
1121
1122         interrupt = &hdev->common_user_cq_interrupt;
1123         wake_pending_user_interrupt_threads(interrupt);
1124
1125         interrupt = &hdev->common_decoder_interrupt;
1126         wake_pending_user_interrupt_threads(interrupt);
1127 }
1128
1129 static void force_complete_cs(struct hl_device *hdev)
1130 {
1131         struct hl_cs *cs;
1132
1133         spin_lock(&hdev->cs_mirror_lock);
1134
1135         list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
1136                 cs->fence->error = -EIO;
1137                 complete_all(&cs->fence->completion);
1138         }
1139
1140         spin_unlock(&hdev->cs_mirror_lock);
1141 }
1142
1143 void hl_abort_waitings_for_completion(struct hl_device *hdev)
1144 {
1145         force_complete_cs(hdev);
1146         force_complete_multi_cs(hdev);
1147         hl_release_pending_user_interrupts(hdev);
1148 }
1149
1150 static void job_wq_completion(struct work_struct *work)
1151 {
1152         struct hl_cs_job *job = container_of(work, struct hl_cs_job,
1153                                                 finish_work);
1154         struct hl_cs *cs = job->cs;
1155         struct hl_device *hdev = cs->ctx->hdev;
1156
1157         /* job is no longer needed */
1158         hl_complete_job(hdev, job);
1159 }
1160
1161 static void cs_completion(struct work_struct *work)
1162 {
1163         struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
1164         struct hl_device *hdev = cs->ctx->hdev;
1165         struct hl_cs_job *job, *tmp;
1166
1167         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1168                 hl_complete_job(hdev, job);
1169 }
1170
1171 u32 hl_get_active_cs_num(struct hl_device *hdev)
1172 {
1173         u32 active_cs_num = 0;
1174         struct hl_cs *cs;
1175
1176         spin_lock(&hdev->cs_mirror_lock);
1177
1178         list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
1179                 if (!cs->completed)
1180                         active_cs_num++;
1181
1182         spin_unlock(&hdev->cs_mirror_lock);
1183
1184         return active_cs_num;
1185 }
1186
1187 static int validate_queue_index(struct hl_device *hdev,
1188                                 struct hl_cs_chunk *chunk,
1189                                 enum hl_queue_type *queue_type,
1190                                 bool *is_kernel_allocated_cb)
1191 {
1192         struct asic_fixed_properties *asic = &hdev->asic_prop;
1193         struct hw_queue_properties *hw_queue_prop;
1194
1195         /* This must be checked here to prevent out-of-bounds access to
1196          * hw_queues_props array
1197          */
1198         if (chunk->queue_index >= asic->max_queues) {
1199                 dev_err(hdev->dev, "Queue index %d is invalid\n",
1200                         chunk->queue_index);
1201                 return -EINVAL;
1202         }
1203
1204         hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1205
1206         if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1207                 dev_err(hdev->dev, "Queue index %d is not applicable\n",
1208                         chunk->queue_index);
1209                 return -EINVAL;
1210         }
1211
1212         if (hw_queue_prop->binned) {
1213                 dev_err(hdev->dev, "Queue index %d is binned out\n",
1214                         chunk->queue_index);
1215                 return -EINVAL;
1216         }
1217
1218         if (hw_queue_prop->driver_only) {
1219                 dev_err(hdev->dev,
1220                         "Queue index %d is restricted for the kernel driver\n",
1221                         chunk->queue_index);
1222                 return -EINVAL;
1223         }
1224
1225         /* When hw queue type isn't QUEUE_TYPE_HW,
1226          * USER_ALLOC_CB flag shall be referred as "don't care".
1227          */
1228         if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1229                 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1230                         if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1231                                 dev_err(hdev->dev,
1232                                         "Queue index %d doesn't support user CB\n",
1233                                         chunk->queue_index);
1234                                 return -EINVAL;
1235                         }
1236
1237                         *is_kernel_allocated_cb = false;
1238                 } else {
1239                         if (!(hw_queue_prop->cb_alloc_flags &
1240                                         CB_ALLOC_KERNEL)) {
1241                                 dev_err(hdev->dev,
1242                                         "Queue index %d doesn't support kernel CB\n",
1243                                         chunk->queue_index);
1244                                 return -EINVAL;
1245                         }
1246
1247                         *is_kernel_allocated_cb = true;
1248                 }
1249         } else {
1250                 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1251                                                 & CB_ALLOC_KERNEL);
1252         }
1253
1254         *queue_type = hw_queue_prop->type;
1255         return 0;
1256 }
1257
1258 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1259                                         struct hl_mem_mgr *mmg,
1260                                         struct hl_cs_chunk *chunk)
1261 {
1262         struct hl_cb *cb;
1263
1264         cb = hl_cb_get(mmg, chunk->cb_handle);
1265         if (!cb) {
1266                 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
1267                 return NULL;
1268         }
1269
1270         if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1271                 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1272                 goto release_cb;
1273         }
1274
1275         atomic_inc(&cb->cs_cnt);
1276
1277         return cb;
1278
1279 release_cb:
1280         hl_cb_put(cb);
1281         return NULL;
1282 }
1283
1284 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1285                 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1286 {
1287         struct hl_cs_job *job;
1288
1289         job = kzalloc(sizeof(*job), GFP_ATOMIC);
1290         if (!job)
1291                 job = kzalloc(sizeof(*job), GFP_KERNEL);
1292
1293         if (!job)
1294                 return NULL;
1295
1296         kref_init(&job->refcount);
1297         job->queue_type = queue_type;
1298         job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1299
1300         if (is_cb_patched(hdev, job))
1301                 INIT_LIST_HEAD(&job->userptr_list);
1302
1303         if (job->queue_type == QUEUE_TYPE_EXT)
1304                 INIT_WORK(&job->finish_work, job_wq_completion);
1305
1306         return job;
1307 }
1308
1309 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1310 {
1311         if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1312                 return CS_TYPE_SIGNAL;
1313         else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1314                 return CS_TYPE_WAIT;
1315         else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1316                 return CS_TYPE_COLLECTIVE_WAIT;
1317         else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1318                 return CS_RESERVE_SIGNALS;
1319         else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1320                 return CS_UNRESERVE_SIGNALS;
1321         else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
1322                 return CS_TYPE_ENGINE_CORE;
1323         else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
1324                 return CS_TYPE_FLUSH_PCI_HBW_WRITES;
1325         else
1326                 return CS_TYPE_DEFAULT;
1327 }
1328
1329 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1330 {
1331         struct hl_device *hdev = hpriv->hdev;
1332         struct hl_ctx *ctx = hpriv->ctx;
1333         u32 cs_type_flags, num_chunks;
1334         enum hl_device_status status;
1335         enum hl_cs_type cs_type;
1336         bool is_sync_stream;
1337         int i;
1338
1339         for (i = 0 ; i < sizeof(args->in.pad) ; i++)
1340                 if (args->in.pad[i]) {
1341                         dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1342                         return -EINVAL;
1343                 }
1344
1345         if (!hl_device_operational(hdev, &status)) {
1346                 return -EBUSY;
1347         }
1348
1349         if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1350                         !hdev->supports_staged_submission) {
1351                 dev_err(hdev->dev, "staged submission not supported");
1352                 return -EPERM;
1353         }
1354
1355         cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1356
1357         if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1358                 dev_err(hdev->dev,
1359                         "CS type flags are mutually exclusive, context %d\n",
1360                         ctx->asid);
1361                 return -EINVAL;
1362         }
1363
1364         cs_type = hl_cs_get_cs_type(cs_type_flags);
1365         num_chunks = args->in.num_chunks_execute;
1366
1367         is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
1368                         cs_type == CS_TYPE_COLLECTIVE_WAIT);
1369
1370         if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
1371                 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1372                 return -EINVAL;
1373         }
1374
1375         if (cs_type == CS_TYPE_DEFAULT) {
1376                 if (!num_chunks) {
1377                         dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1378                         return -EINVAL;
1379                 }
1380         } else if (is_sync_stream && num_chunks != 1) {
1381                 dev_err(hdev->dev,
1382                         "Sync stream CS mandates one chunk only, context %d\n",
1383                         ctx->asid);
1384                 return -EINVAL;
1385         }
1386
1387         return 0;
1388 }
1389
1390 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1391                                         struct hl_cs_chunk **cs_chunk_array,
1392                                         void __user *chunks, u32 num_chunks,
1393                                         struct hl_ctx *ctx)
1394 {
1395         u32 size_to_copy;
1396
1397         if (num_chunks > HL_MAX_JOBS_PER_CS) {
1398                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1399                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1400                 dev_err(hdev->dev,
1401                         "Number of chunks can NOT be larger than %d\n",
1402                         HL_MAX_JOBS_PER_CS);
1403                 return -EINVAL;
1404         }
1405
1406         *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1407                                         GFP_ATOMIC);
1408         if (!*cs_chunk_array)
1409                 *cs_chunk_array = kmalloc_array(num_chunks,
1410                                         sizeof(**cs_chunk_array), GFP_KERNEL);
1411         if (!*cs_chunk_array) {
1412                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1413                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1414                 return -ENOMEM;
1415         }
1416
1417         size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1418         if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1419                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1420                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1421                 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1422                 kfree(*cs_chunk_array);
1423                 return -EFAULT;
1424         }
1425
1426         return 0;
1427 }
1428
1429 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1430                                 u64 sequence, u32 flags,
1431                                 u32 encaps_signal_handle)
1432 {
1433         if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1434                 return 0;
1435
1436         cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1437         cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1438
1439         if (cs->staged_first) {
1440                 /* Staged CS sequence is the first CS sequence */
1441                 INIT_LIST_HEAD(&cs->staged_cs_node);
1442                 cs->staged_sequence = cs->sequence;
1443
1444                 if (cs->encaps_signals)
1445                         cs->encaps_sig_hdl_id = encaps_signal_handle;
1446         } else {
1447                 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1448                  * under the cs_mirror_lock
1449                  */
1450                 cs->staged_sequence = sequence;
1451         }
1452
1453         /* Increment CS reference if needed */
1454         staged_cs_get(hdev, cs);
1455
1456         cs->staged_cs = true;
1457
1458         return 0;
1459 }
1460
1461 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1462 {
1463         int i;
1464
1465         for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1466                 if (qid == hdev->stream_master_qid_arr[i])
1467                         return BIT(i);
1468
1469         return 0;
1470 }
1471
1472 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1473                                 u32 num_chunks, u64 *cs_seq, u32 flags,
1474                                 u32 encaps_signals_handle, u32 timeout,
1475                                 u16 *signal_initial_sob_count)
1476 {
1477         bool staged_mid, int_queues_only = true, using_hw_queues = false;
1478         struct hl_device *hdev = hpriv->hdev;
1479         struct hl_cs_chunk *cs_chunk_array;
1480         struct hl_cs_counters_atomic *cntr;
1481         struct hl_ctx *ctx = hpriv->ctx;
1482         struct hl_cs_job *job;
1483         struct hl_cs *cs;
1484         struct hl_cb *cb;
1485         u64 user_sequence;
1486         u8 stream_master_qid_map = 0;
1487         int rc, i;
1488
1489         cntr = &hdev->aggregated_cs_counters;
1490         user_sequence = *cs_seq;
1491         *cs_seq = ULLONG_MAX;
1492
1493         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1494                         hpriv->ctx);
1495         if (rc)
1496                 goto out;
1497
1498         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1499                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1500                 staged_mid = true;
1501         else
1502                 staged_mid = false;
1503
1504         rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1505                         staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1506                         timeout);
1507         if (rc)
1508                 goto free_cs_chunk_array;
1509
1510         *cs_seq = cs->sequence;
1511
1512         hl_debugfs_add_cs(cs);
1513
1514         rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1515                                                 encaps_signals_handle);
1516         if (rc)
1517                 goto free_cs_object;
1518
1519         /* If this is a staged submission we must return the staged sequence
1520          * rather than the internal CS sequence
1521          */
1522         if (cs->staged_cs)
1523                 *cs_seq = cs->staged_sequence;
1524
1525         /* Validate ALL the CS chunks before submitting the CS */
1526         for (i = 0 ; i < num_chunks ; i++) {
1527                 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1528                 enum hl_queue_type queue_type;
1529                 bool is_kernel_allocated_cb;
1530
1531                 rc = validate_queue_index(hdev, chunk, &queue_type,
1532                                                 &is_kernel_allocated_cb);
1533                 if (rc) {
1534                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1535                         atomic64_inc(&cntr->validation_drop_cnt);
1536                         goto free_cs_object;
1537                 }
1538
1539                 if (is_kernel_allocated_cb) {
1540                         cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
1541                         if (!cb) {
1542                                 atomic64_inc(
1543                                         &ctx->cs_counters.validation_drop_cnt);
1544                                 atomic64_inc(&cntr->validation_drop_cnt);
1545                                 rc = -EINVAL;
1546                                 goto free_cs_object;
1547                         }
1548                 } else {
1549                         cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1550                 }
1551
1552                 if (queue_type == QUEUE_TYPE_EXT ||
1553                                                 queue_type == QUEUE_TYPE_HW) {
1554                         int_queues_only = false;
1555
1556                         /*
1557                          * store which stream are being used for external/HW
1558                          * queues of this CS
1559                          */
1560                         if (hdev->supports_wait_for_multi_cs)
1561                                 stream_master_qid_map |=
1562                                         get_stream_master_qid_mask(hdev,
1563                                                         chunk->queue_index);
1564                 }
1565
1566                 if (queue_type == QUEUE_TYPE_HW)
1567                         using_hw_queues = true;
1568
1569                 job = hl_cs_allocate_job(hdev, queue_type,
1570                                                 is_kernel_allocated_cb);
1571                 if (!job) {
1572                         atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1573                         atomic64_inc(&cntr->out_of_mem_drop_cnt);
1574                         dev_err(hdev->dev, "Failed to allocate a new job\n");
1575                         rc = -ENOMEM;
1576                         if (is_kernel_allocated_cb)
1577                                 goto release_cb;
1578
1579                         goto free_cs_object;
1580                 }
1581
1582                 job->id = i + 1;
1583                 job->cs = cs;
1584                 job->user_cb = cb;
1585                 job->user_cb_size = chunk->cb_size;
1586                 job->hw_queue_id = chunk->queue_index;
1587
1588                 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1589                 cs->jobs_cnt++;
1590
1591                 list_add_tail(&job->cs_node, &cs->job_list);
1592
1593                 /*
1594                  * Increment CS reference. When CS reference is 0, CS is
1595                  * done and can be signaled to user and free all its resources
1596                  * Only increment for JOB on external or H/W queues, because
1597                  * only for those JOBs we get completion
1598                  */
1599                 if (cs_needs_completion(cs) &&
1600                         (job->queue_type == QUEUE_TYPE_EXT ||
1601                                 job->queue_type == QUEUE_TYPE_HW))
1602                         cs_get(cs);
1603
1604                 hl_debugfs_add_job(hdev, job);
1605
1606                 rc = cs_parser(hpriv, job);
1607                 if (rc) {
1608                         atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1609                         atomic64_inc(&cntr->parsing_drop_cnt);
1610                         dev_err(hdev->dev,
1611                                 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1612                                 cs->ctx->asid, cs->sequence, job->id, rc);
1613                         goto free_cs_object;
1614                 }
1615         }
1616
1617         /* We allow a CS with any queue type combination as long as it does
1618          * not get a completion
1619          */
1620         if (int_queues_only && cs_needs_completion(cs)) {
1621                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1622                 atomic64_inc(&cntr->validation_drop_cnt);
1623                 dev_err(hdev->dev,
1624                         "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1625                         cs->ctx->asid, cs->sequence);
1626                 rc = -EINVAL;
1627                 goto free_cs_object;
1628         }
1629
1630         if (using_hw_queues)
1631                 INIT_WORK(&cs->finish_work, cs_completion);
1632
1633         /*
1634          * store the (external/HW queues) streams used by the CS in the
1635          * fence object for multi-CS completion
1636          */
1637         if (hdev->supports_wait_for_multi_cs)
1638                 cs->fence->stream_master_qid_map = stream_master_qid_map;
1639
1640         rc = hl_hw_queue_schedule_cs(cs);
1641         if (rc) {
1642                 if (rc != -EAGAIN)
1643                         dev_err(hdev->dev,
1644                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1645                                 cs->ctx->asid, cs->sequence, rc);
1646                 goto free_cs_object;
1647         }
1648
1649         *signal_initial_sob_count = cs->initial_sob_count;
1650
1651         rc = HL_CS_STATUS_SUCCESS;
1652         goto put_cs;
1653
1654 release_cb:
1655         atomic_dec(&cb->cs_cnt);
1656         hl_cb_put(cb);
1657 free_cs_object:
1658         cs_rollback(hdev, cs);
1659         *cs_seq = ULLONG_MAX;
1660         /* The path below is both for good and erroneous exits */
1661 put_cs:
1662         /* We finished with the CS in this function, so put the ref */
1663         cs_put(cs);
1664 free_cs_chunk_array:
1665         kfree(cs_chunk_array);
1666 out:
1667         return rc;
1668 }
1669
1670 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1671                                 u64 *cs_seq)
1672 {
1673         struct hl_device *hdev = hpriv->hdev;
1674         struct hl_ctx *ctx = hpriv->ctx;
1675         bool need_soft_reset = false;
1676         int rc = 0, do_ctx_switch = 0;
1677         void __user *chunks;
1678         u32 num_chunks, tmp;
1679         u16 sob_count;
1680         int ret;
1681
1682         if (hdev->supports_ctx_switch)
1683                 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1684
1685         if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1686                 mutex_lock(&hpriv->restore_phase_mutex);
1687
1688                 if (do_ctx_switch) {
1689                         rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1690                         if (rc) {
1691                                 dev_err_ratelimited(hdev->dev,
1692                                         "Failed to switch to context %d, rejecting CS! %d\n",
1693                                         ctx->asid, rc);
1694                                 /*
1695                                  * If we timedout, or if the device is not IDLE
1696                                  * while we want to do context-switch (-EBUSY),
1697                                  * we need to soft-reset because QMAN is
1698                                  * probably stuck. However, we can't call to
1699                                  * reset here directly because of deadlock, so
1700                                  * need to do it at the very end of this
1701                                  * function
1702                                  */
1703                                 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1704                                         need_soft_reset = true;
1705                                 mutex_unlock(&hpriv->restore_phase_mutex);
1706                                 goto out;
1707                         }
1708                 }
1709
1710                 hdev->asic_funcs->restore_phase_topology(hdev);
1711
1712                 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1713                 num_chunks = args->in.num_chunks_restore;
1714
1715                 if (!num_chunks) {
1716                         dev_dbg(hdev->dev,
1717                                 "Need to run restore phase but restore CS is empty\n");
1718                         rc = 0;
1719                 } else {
1720                         rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1721                                         cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
1722                 }
1723
1724                 mutex_unlock(&hpriv->restore_phase_mutex);
1725
1726                 if (rc) {
1727                         dev_err(hdev->dev,
1728                                 "Failed to submit restore CS for context %d (%d)\n",
1729                                 ctx->asid, rc);
1730                         goto out;
1731                 }
1732
1733                 /* Need to wait for restore completion before execution phase */
1734                 if (num_chunks) {
1735                         enum hl_cs_wait_status status;
1736 wait_again:
1737                         ret = _hl_cs_wait_ioctl(hdev, ctx,
1738                                         jiffies_to_usecs(hdev->timeout_jiffies),
1739                                         *cs_seq, &status, NULL);
1740                         if (ret) {
1741                                 if (ret == -ERESTARTSYS) {
1742                                         usleep_range(100, 200);
1743                                         goto wait_again;
1744                                 }
1745
1746                                 dev_err(hdev->dev,
1747                                         "Restore CS for context %d failed to complete %d\n",
1748                                         ctx->asid, ret);
1749                                 rc = -ENOEXEC;
1750                                 goto out;
1751                         }
1752                 }
1753
1754                 if (hdev->supports_ctx_switch)
1755                         ctx->thread_ctx_switch_wait_token = 1;
1756
1757         } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1758                 rc = hl_poll_timeout_memory(hdev,
1759                         &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1760                         100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1761
1762                 if (rc == -ETIMEDOUT) {
1763                         dev_err(hdev->dev,
1764                                 "context switch phase timeout (%d)\n", tmp);
1765                         goto out;
1766                 }
1767         }
1768
1769 out:
1770         if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1771                 hl_device_reset(hdev, 0);
1772
1773         return rc;
1774 }
1775
1776 /*
1777  * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1778  * if the SOB value reaches the max value move to the other SOB reserved
1779  * to the queue.
1780  * @hdev: pointer to device structure
1781  * @q_idx: stream queue index
1782  * @hw_sob: the H/W SOB used in this signal CS.
1783  * @count: signals count
1784  * @encaps_sig: tells whether it's reservation for encaps signals or not.
1785  *
1786  * Note that this function must be called while hw_queues_lock is taken.
1787  */
1788 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1789                         struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1790
1791 {
1792         struct hl_sync_stream_properties *prop;
1793         struct hl_hw_sob *sob = *hw_sob, *other_sob;
1794         u8 other_sob_offset;
1795
1796         prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1797
1798         hw_sob_get(sob);
1799
1800         /* check for wraparound */
1801         if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1802                 /*
1803                  * Decrement as we reached the max value.
1804                  * The release function won't be called here as we've
1805                  * just incremented the refcount right before calling this
1806                  * function.
1807                  */
1808                 hw_sob_put_err(sob);
1809
1810                 /*
1811                  * check the other sob value, if it still in use then fail
1812                  * otherwise make the switch
1813                  */
1814                 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1815                 other_sob = &prop->hw_sob[other_sob_offset];
1816
1817                 if (kref_read(&other_sob->kref) != 1) {
1818                         dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1819                                                                 q_idx);
1820                         return -EINVAL;
1821                 }
1822
1823                 /*
1824                  * next_sob_val always points to the next available signal
1825                  * in the sob, so in encaps signals it will be the next one
1826                  * after reserving the required amount.
1827                  */
1828                 if (encaps_sig)
1829                         prop->next_sob_val = count + 1;
1830                 else
1831                         prop->next_sob_val = count;
1832
1833                 /* only two SOBs are currently in use */
1834                 prop->curr_sob_offset = other_sob_offset;
1835                 *hw_sob = other_sob;
1836
1837                 /*
1838                  * check if other_sob needs reset, then do it before using it
1839                  * for the reservation or the next signal cs.
1840                  * we do it here, and for both encaps and regular signal cs
1841                  * cases in order to avoid possible races of two kref_put
1842                  * of the sob which can occur at the same time if we move the
1843                  * sob reset(kref_put) to cs_do_release function.
1844                  * in addition, if we have combination of cs signal and
1845                  * encaps, and at the point we need to reset the sob there was
1846                  * no more reservations and only signal cs keep coming,
1847                  * in such case we need signal_cs to put the refcount and
1848                  * reset the sob.
1849                  */
1850                 if (other_sob->need_reset)
1851                         hw_sob_put(other_sob);
1852
1853                 if (encaps_sig) {
1854                         /* set reset indication for the sob */
1855                         sob->need_reset = true;
1856                         hw_sob_get(other_sob);
1857                 }
1858
1859                 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1860                                 prop->curr_sob_offset, q_idx);
1861         } else {
1862                 prop->next_sob_val += count;
1863         }
1864
1865         return 0;
1866 }
1867
1868 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1869                 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1870                 bool encaps_signals)
1871 {
1872         u64 *signal_seq_arr = NULL;
1873         u32 size_to_copy, signal_seq_arr_len;
1874         int rc = 0;
1875
1876         if (encaps_signals) {
1877                 *signal_seq = chunk->encaps_signal_seq;
1878                 return 0;
1879         }
1880
1881         signal_seq_arr_len = chunk->num_signal_seq_arr;
1882
1883         /* currently only one signal seq is supported */
1884         if (signal_seq_arr_len != 1) {
1885                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1886                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1887                 dev_err(hdev->dev,
1888                         "Wait for signal CS supports only one signal CS seq\n");
1889                 return -EINVAL;
1890         }
1891
1892         signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1893                                         sizeof(*signal_seq_arr),
1894                                         GFP_ATOMIC);
1895         if (!signal_seq_arr)
1896                 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1897                                         sizeof(*signal_seq_arr),
1898                                         GFP_KERNEL);
1899         if (!signal_seq_arr) {
1900                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1901                 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1902                 return -ENOMEM;
1903         }
1904
1905         size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1906         if (copy_from_user(signal_seq_arr,
1907                                 u64_to_user_ptr(chunk->signal_seq_arr),
1908                                 size_to_copy)) {
1909                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1910                 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1911                 dev_err(hdev->dev,
1912                         "Failed to copy signal seq array from user\n");
1913                 rc = -EFAULT;
1914                 goto out;
1915         }
1916
1917         /* currently it is guaranteed to have only one signal seq */
1918         *signal_seq = signal_seq_arr[0];
1919
1920 out:
1921         kfree(signal_seq_arr);
1922
1923         return rc;
1924 }
1925
1926 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1927                 struct hl_ctx *ctx, struct hl_cs *cs,
1928                 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1929 {
1930         struct hl_cs_counters_atomic *cntr;
1931         struct hl_cs_job *job;
1932         struct hl_cb *cb;
1933         u32 cb_size;
1934
1935         cntr = &hdev->aggregated_cs_counters;
1936
1937         job = hl_cs_allocate_job(hdev, q_type, true);
1938         if (!job) {
1939                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1940                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1941                 dev_err(hdev->dev, "Failed to allocate a new job\n");
1942                 return -ENOMEM;
1943         }
1944
1945         if (cs->type == CS_TYPE_WAIT)
1946                 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1947         else
1948                 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1949
1950         cb = hl_cb_kernel_create(hdev, cb_size,
1951                                 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1952         if (!cb) {
1953                 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1954                 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1955                 kfree(job);
1956                 return -EFAULT;
1957         }
1958
1959         job->id = 0;
1960         job->cs = cs;
1961         job->user_cb = cb;
1962         atomic_inc(&job->user_cb->cs_cnt);
1963         job->user_cb_size = cb_size;
1964         job->hw_queue_id = q_idx;
1965
1966         if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1967                         && cs->encaps_signals)
1968                 job->encaps_sig_wait_offset = encaps_signal_offset;
1969         /*
1970          * No need in parsing, user CB is the patched CB.
1971          * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1972          * the CB idr anymore and to decrement its refcount as it was
1973          * incremented inside hl_cb_kernel_create().
1974          */
1975         job->patched_cb = job->user_cb;
1976         job->job_cb_size = job->user_cb_size;
1977         hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
1978
1979         /* increment refcount as for external queues we get completion */
1980         cs_get(cs);
1981
1982         cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1983         cs->jobs_cnt++;
1984
1985         list_add_tail(&job->cs_node, &cs->job_list);
1986
1987         hl_debugfs_add_job(hdev, job);
1988
1989         return 0;
1990 }
1991
1992 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1993                                 u32 q_idx, u32 count,
1994                                 u32 *handle_id, u32 *sob_addr,
1995                                 u32 *signals_count)
1996 {
1997         struct hw_queue_properties *hw_queue_prop;
1998         struct hl_sync_stream_properties *prop;
1999         struct hl_device *hdev = hpriv->hdev;
2000         struct hl_cs_encaps_sig_handle *handle;
2001         struct hl_encaps_signals_mgr *mgr;
2002         struct hl_hw_sob *hw_sob;
2003         int hdl_id;
2004         int rc = 0;
2005
2006         if (count >= HL_MAX_SOB_VAL) {
2007                 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
2008                                                 count);
2009                 rc = -EINVAL;
2010                 goto out;
2011         }
2012
2013         if (q_idx >= hdev->asic_prop.max_queues) {
2014                 dev_err(hdev->dev, "Queue index %d is invalid\n",
2015                         q_idx);
2016                 rc = -EINVAL;
2017                 goto out;
2018         }
2019
2020         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2021
2022         if (!hw_queue_prop->supports_sync_stream) {
2023                 dev_err(hdev->dev,
2024                         "Queue index %d does not support sync stream operations\n",
2025                                                                         q_idx);
2026                 rc = -EINVAL;
2027                 goto out;
2028         }
2029
2030         prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2031
2032         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
2033         if (!handle) {
2034                 rc = -ENOMEM;
2035                 goto out;
2036         }
2037
2038         handle->count = count;
2039
2040         hl_ctx_get(hpriv->ctx);
2041         handle->ctx = hpriv->ctx;
2042         mgr = &hpriv->ctx->sig_mgr;
2043
2044         spin_lock(&mgr->lock);
2045         hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
2046         spin_unlock(&mgr->lock);
2047
2048         if (hdl_id < 0) {
2049                 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
2050                 rc = -EINVAL;
2051                 goto put_ctx;
2052         }
2053
2054         handle->id = hdl_id;
2055         handle->q_idx = q_idx;
2056         handle->hdev = hdev;
2057         kref_init(&handle->refcount);
2058
2059         hdev->asic_funcs->hw_queues_lock(hdev);
2060
2061         hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2062
2063         /*
2064          * Increment the SOB value by count by user request
2065          * to reserve those signals
2066          * check if the signals amount to reserve is not exceeding the max sob
2067          * value, if yes then switch sob.
2068          */
2069         rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
2070                                                                 true);
2071         if (rc) {
2072                 dev_err(hdev->dev, "Failed to switch SOB\n");
2073                 hdev->asic_funcs->hw_queues_unlock(hdev);
2074                 rc = -EINVAL;
2075                 goto remove_idr;
2076         }
2077         /* set the hw_sob to the handle after calling the sob wraparound handler
2078          * since sob could have changed.
2079          */
2080         handle->hw_sob = hw_sob;
2081
2082         /* store the current sob value for unreserve validity check, and
2083          * signal offset support
2084          */
2085         handle->pre_sob_val = prop->next_sob_val - handle->count;
2086
2087         handle->cs_seq = ULLONG_MAX;
2088
2089         *signals_count = prop->next_sob_val;
2090         hdev->asic_funcs->hw_queues_unlock(hdev);
2091
2092         *sob_addr = handle->hw_sob->sob_addr;
2093         *handle_id = hdl_id;
2094
2095         dev_dbg(hdev->dev,
2096                 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
2097                         hw_sob->sob_id, handle->hw_sob->sob_addr,
2098                         prop->next_sob_val - 1, q_idx, hdl_id);
2099         goto out;
2100
2101 remove_idr:
2102         spin_lock(&mgr->lock);
2103         idr_remove(&mgr->handles, hdl_id);
2104         spin_unlock(&mgr->lock);
2105
2106 put_ctx:
2107         hl_ctx_put(handle->ctx);
2108         kfree(handle);
2109
2110 out:
2111         return rc;
2112 }
2113
2114 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
2115 {
2116         struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
2117         struct hl_sync_stream_properties *prop;
2118         struct hl_device *hdev = hpriv->hdev;
2119         struct hl_encaps_signals_mgr *mgr;
2120         struct hl_hw_sob *hw_sob;
2121         u32 q_idx, sob_addr;
2122         int rc = 0;
2123
2124         mgr = &hpriv->ctx->sig_mgr;
2125
2126         spin_lock(&mgr->lock);
2127         encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
2128         if (encaps_sig_hdl) {
2129                 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
2130                                 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
2131                                         encaps_sig_hdl->count);
2132
2133                 hdev->asic_funcs->hw_queues_lock(hdev);
2134
2135                 q_idx = encaps_sig_hdl->q_idx;
2136                 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2137                 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2138                 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
2139
2140                 /* Check if sob_val got out of sync due to other
2141                  * signal submission requests which were handled
2142                  * between the reserve-unreserve calls or SOB switch
2143                  * upon reaching SOB max value.
2144                  */
2145                 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
2146                                 != prop->next_sob_val ||
2147                                 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
2148                         dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
2149                                 encaps_sig_hdl->pre_sob_val,
2150                                 (prop->next_sob_val - encaps_sig_hdl->count));
2151
2152                         hdev->asic_funcs->hw_queues_unlock(hdev);
2153                         rc = -EINVAL;
2154                         goto out;
2155                 }
2156
2157                 /*
2158                  * Decrement the SOB value by count by user request
2159                  * to unreserve those signals
2160                  */
2161                 prop->next_sob_val -= encaps_sig_hdl->count;
2162
2163                 hdev->asic_funcs->hw_queues_unlock(hdev);
2164
2165                 hw_sob_put(hw_sob);
2166
2167                 /* Release the id and free allocated memory of the handle */
2168                 idr_remove(&mgr->handles, handle_id);
2169                 hl_ctx_put(encaps_sig_hdl->ctx);
2170                 kfree(encaps_sig_hdl);
2171         } else {
2172                 rc = -EINVAL;
2173                 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
2174         }
2175 out:
2176         spin_unlock(&mgr->lock);
2177
2178         return rc;
2179 }
2180
2181 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
2182                                 void __user *chunks, u32 num_chunks,
2183                                 u64 *cs_seq, u32 flags, u32 timeout,
2184                                 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
2185 {
2186         struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
2187         bool handle_found = false, is_wait_cs = false,
2188                         wait_cs_submitted = false,
2189                         cs_encaps_signals = false;
2190         struct hl_cs_chunk *cs_chunk_array, *chunk;
2191         bool staged_cs_with_encaps_signals = false;
2192         struct hw_queue_properties *hw_queue_prop;
2193         struct hl_device *hdev = hpriv->hdev;
2194         struct hl_cs_compl *sig_waitcs_cmpl;
2195         u32 q_idx, collective_engine_id = 0;
2196         struct hl_cs_counters_atomic *cntr;
2197         struct hl_fence *sig_fence = NULL;
2198         struct hl_ctx *ctx = hpriv->ctx;
2199         enum hl_queue_type q_type;
2200         struct hl_cs *cs;
2201         u64 signal_seq;
2202         int rc;
2203
2204         cntr = &hdev->aggregated_cs_counters;
2205         *cs_seq = ULLONG_MAX;
2206
2207         rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
2208                         ctx);
2209         if (rc)
2210                 goto out;
2211
2212         /* currently it is guaranteed to have only one chunk */
2213         chunk = &cs_chunk_array[0];
2214
2215         if (chunk->queue_index >= hdev->asic_prop.max_queues) {
2216                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2217                 atomic64_inc(&cntr->validation_drop_cnt);
2218                 dev_err(hdev->dev, "Queue index %d is invalid\n",
2219                         chunk->queue_index);
2220                 rc = -EINVAL;
2221                 goto free_cs_chunk_array;
2222         }
2223
2224         q_idx = chunk->queue_index;
2225         hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2226         q_type = hw_queue_prop->type;
2227
2228         if (!hw_queue_prop->supports_sync_stream) {
2229                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2230                 atomic64_inc(&cntr->validation_drop_cnt);
2231                 dev_err(hdev->dev,
2232                         "Queue index %d does not support sync stream operations\n",
2233                         q_idx);
2234                 rc = -EINVAL;
2235                 goto free_cs_chunk_array;
2236         }
2237
2238         if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2239                 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2240                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2241                         atomic64_inc(&cntr->validation_drop_cnt);
2242                         dev_err(hdev->dev,
2243                                 "Queue index %d is invalid\n", q_idx);
2244                         rc = -EINVAL;
2245                         goto free_cs_chunk_array;
2246                 }
2247
2248                 if (!hdev->nic_ports_mask) {
2249                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2250                         atomic64_inc(&cntr->validation_drop_cnt);
2251                         dev_err(hdev->dev,
2252                                 "Collective operations not supported when NIC ports are disabled");
2253                         rc = -EINVAL;
2254                         goto free_cs_chunk_array;
2255                 }
2256
2257                 collective_engine_id = chunk->collective_engine_id;
2258         }
2259
2260         is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2261                         cs_type == CS_TYPE_COLLECTIVE_WAIT);
2262
2263         cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2264
2265         if (is_wait_cs) {
2266                 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2267                                 ctx, cs_encaps_signals);
2268                 if (rc)
2269                         goto free_cs_chunk_array;
2270
2271                 if (cs_encaps_signals) {
2272                         /* check if cs sequence has encapsulated
2273                          * signals handle
2274                          */
2275                         struct idr *idp;
2276                         u32 id;
2277
2278                         spin_lock(&ctx->sig_mgr.lock);
2279                         idp = &ctx->sig_mgr.handles;
2280                         idr_for_each_entry(idp, encaps_sig_hdl, id) {
2281                                 if (encaps_sig_hdl->cs_seq == signal_seq) {
2282                                         /* get refcount to protect removing this handle from idr,
2283                                          * needed when multiple wait cs are used with offset
2284                                          * to wait on reserved encaps signals.
2285                                          * Since kref_put of this handle is executed outside the
2286                                          * current lock, it is possible that the handle refcount
2287                                          * is 0 but it yet to be removed from the list. In this
2288                                          * case need to consider the handle as not valid.
2289                                          */
2290                                         if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
2291                                                 handle_found = true;
2292                                         break;
2293                                 }
2294                         }
2295                         spin_unlock(&ctx->sig_mgr.lock);
2296
2297                         if (!handle_found) {
2298                                 /* treat as signal CS already finished */
2299                                 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2300                                                 signal_seq);
2301                                 rc = 0;
2302                                 goto free_cs_chunk_array;
2303                         }
2304
2305                         /* validate also the signal offset value */
2306                         if (chunk->encaps_signal_offset >
2307                                         encaps_sig_hdl->count) {
2308                                 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2309                                                 chunk->encaps_signal_offset,
2310                                                 encaps_sig_hdl->count);
2311                                 rc = -EINVAL;
2312                                 goto free_cs_chunk_array;
2313                         }
2314                 }
2315
2316                 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2317                 if (IS_ERR(sig_fence)) {
2318                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2319                         atomic64_inc(&cntr->validation_drop_cnt);
2320                         dev_err(hdev->dev,
2321                                 "Failed to get signal CS with seq 0x%llx\n",
2322                                 signal_seq);
2323                         rc = PTR_ERR(sig_fence);
2324                         goto free_cs_chunk_array;
2325                 }
2326
2327                 if (!sig_fence) {
2328                         /* signal CS already finished */
2329                         rc = 0;
2330                         goto free_cs_chunk_array;
2331                 }
2332
2333                 sig_waitcs_cmpl =
2334                         container_of(sig_fence, struct hl_cs_compl, base_fence);
2335
2336                 staged_cs_with_encaps_signals = !!
2337                                 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2338                                 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2339
2340                 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2341                                 !staged_cs_with_encaps_signals) {
2342                         atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2343                         atomic64_inc(&cntr->validation_drop_cnt);
2344                         dev_err(hdev->dev,
2345                                 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2346                                 signal_seq);
2347                         hl_fence_put(sig_fence);
2348                         rc = -EINVAL;
2349                         goto free_cs_chunk_array;
2350                 }
2351
2352                 if (completion_done(&sig_fence->completion)) {
2353                         /* signal CS already finished */
2354                         hl_fence_put(sig_fence);
2355                         rc = 0;
2356                         goto free_cs_chunk_array;
2357                 }
2358         }
2359
2360         rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2361         if (rc) {
2362                 if (is_wait_cs)
2363                         hl_fence_put(sig_fence);
2364
2365                 goto free_cs_chunk_array;
2366         }
2367
2368         /*
2369          * Save the signal CS fence for later initialization right before
2370          * hanging the wait CS on the queue.
2371          * for encaps signals case, we save the cs sequence and handle pointer
2372          * for later initialization.
2373          */
2374         if (is_wait_cs) {
2375                 cs->signal_fence = sig_fence;
2376                 /* store the handle pointer, so we don't have to
2377                  * look for it again, later on the flow
2378                  * when we need to set SOB info in hw_queue.
2379                  */
2380                 if (cs->encaps_signals)
2381                         cs->encaps_sig_hdl = encaps_sig_hdl;
2382         }
2383
2384         hl_debugfs_add_cs(cs);
2385
2386         *cs_seq = cs->sequence;
2387
2388         if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2389                 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2390                                 q_idx, chunk->encaps_signal_offset);
2391         else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2392                 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2393                                 cs, q_idx, collective_engine_id,
2394                                 chunk->encaps_signal_offset);
2395         else {
2396                 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2397                 atomic64_inc(&cntr->validation_drop_cnt);
2398                 rc = -EINVAL;
2399         }
2400
2401         if (rc)
2402                 goto free_cs_object;
2403
2404         if (q_type == QUEUE_TYPE_HW)
2405                 INIT_WORK(&cs->finish_work, cs_completion);
2406
2407         rc = hl_hw_queue_schedule_cs(cs);
2408         if (rc) {
2409                 /* In case wait cs failed here, it means the signal cs
2410                  * already completed. we want to free all it's related objects
2411                  * but we don't want to fail the ioctl.
2412                  */
2413                 if (is_wait_cs)
2414                         rc = 0;
2415                 else if (rc != -EAGAIN)
2416                         dev_err(hdev->dev,
2417                                 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2418                                 ctx->asid, cs->sequence, rc);
2419                 goto free_cs_object;
2420         }
2421
2422         *signal_sob_addr_offset = cs->sob_addr_offset;
2423         *signal_initial_sob_count = cs->initial_sob_count;
2424
2425         rc = HL_CS_STATUS_SUCCESS;
2426         if (is_wait_cs)
2427                 wait_cs_submitted = true;
2428         goto put_cs;
2429
2430 free_cs_object:
2431         cs_rollback(hdev, cs);
2432         *cs_seq = ULLONG_MAX;
2433         /* The path below is both for good and erroneous exits */
2434 put_cs:
2435         /* We finished with the CS in this function, so put the ref */
2436         cs_put(cs);
2437 free_cs_chunk_array:
2438         if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
2439                 kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
2440         kfree(cs_chunk_array);
2441 out:
2442         return rc;
2443 }
2444
2445 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
2446                                                 u32 num_engine_cores, u32 core_command)
2447 {
2448         int rc;
2449         struct hl_device *hdev = hpriv->hdev;
2450         void __user *engine_cores_arr;
2451         u32 *cores;
2452
2453         if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
2454                 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
2455                 return -EINVAL;
2456         }
2457
2458         if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
2459                 dev_err(hdev->dev, "Engine core command is invalid\n");
2460                 return -EINVAL;
2461         }
2462
2463         engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
2464         cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
2465         if (!cores)
2466                 return -ENOMEM;
2467
2468         if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
2469                 dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
2470                 kfree(cores);
2471                 return -EFAULT;
2472         }
2473
2474         rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
2475         kfree(cores);
2476
2477         return rc;
2478 }
2479
2480 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
2481 {
2482         struct hl_device *hdev = hpriv->hdev;
2483         struct asic_fixed_properties *prop = &hdev->asic_prop;
2484
2485         if (!prop->hbw_flush_reg) {
2486                 dev_dbg(hdev->dev, "HBW flush is not supported\n");
2487                 return -EOPNOTSUPP;
2488         }
2489
2490         RREG32(prop->hbw_flush_reg);
2491
2492         return 0;
2493 }
2494
2495 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2496 {
2497         union hl_cs_args *args = data;
2498         enum hl_cs_type cs_type = 0;
2499         u64 cs_seq = ULONG_MAX;
2500         void __user *chunks;
2501         u32 num_chunks, flags, timeout,
2502                 signals_count = 0, sob_addr = 0, handle_id = 0;
2503         u16 sob_initial_count = 0;
2504         int rc;
2505
2506         rc = hl_cs_sanity_checks(hpriv, args);
2507         if (rc)
2508                 goto out;
2509
2510         rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2511         if (rc)
2512                 goto out;
2513
2514         cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2515                                         ~HL_CS_FLAGS_FORCE_RESTORE);
2516         chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2517         num_chunks = args->in.num_chunks_execute;
2518         flags = args->in.cs_flags;
2519
2520         /* In case this is a staged CS, user should supply the CS sequence */
2521         if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2522                         !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2523                 cs_seq = args->in.seq;
2524
2525         timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2526                         ? msecs_to_jiffies(args->in.timeout * 1000)
2527                         : hpriv->hdev->timeout_jiffies;
2528
2529         switch (cs_type) {
2530         case CS_TYPE_SIGNAL:
2531         case CS_TYPE_WAIT:
2532         case CS_TYPE_COLLECTIVE_WAIT:
2533                 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2534                                         &cs_seq, args->in.cs_flags, timeout,
2535                                         &sob_addr, &sob_initial_count);
2536                 break;
2537         case CS_RESERVE_SIGNALS:
2538                 rc = cs_ioctl_reserve_signals(hpriv,
2539                                         args->in.encaps_signals_q_idx,
2540                                         args->in.encaps_signals_count,
2541                                         &handle_id, &sob_addr, &signals_count);
2542                 break;
2543         case CS_UNRESERVE_SIGNALS:
2544                 rc = cs_ioctl_unreserve_signals(hpriv,
2545                                         args->in.encaps_sig_handle_id);
2546                 break;
2547         case CS_TYPE_ENGINE_CORE:
2548                 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
2549                                 args->in.num_engine_cores, args->in.core_command);
2550                 break;
2551         case CS_TYPE_FLUSH_PCI_HBW_WRITES:
2552                 rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
2553                 break;
2554         default:
2555                 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2556                                                 args->in.cs_flags,
2557                                                 args->in.encaps_sig_handle_id,
2558                                                 timeout, &sob_initial_count);
2559                 break;
2560         }
2561 out:
2562         if (rc != -EAGAIN) {
2563                 memset(args, 0, sizeof(*args));
2564
2565                 switch (cs_type) {
2566                 case CS_RESERVE_SIGNALS:
2567                         args->out.handle_id = handle_id;
2568                         args->out.sob_base_addr_offset = sob_addr;
2569                         args->out.count = signals_count;
2570                         break;
2571                 case CS_TYPE_SIGNAL:
2572                         args->out.sob_base_addr_offset = sob_addr;
2573                         args->out.sob_count_before_submission = sob_initial_count;
2574                         args->out.seq = cs_seq;
2575                         break;
2576                 case CS_TYPE_DEFAULT:
2577                         args->out.sob_count_before_submission = sob_initial_count;
2578                         args->out.seq = cs_seq;
2579                         break;
2580                 default:
2581                         args->out.seq = cs_seq;
2582                         break;
2583                 }
2584
2585                 args->out.status = rc;
2586         }
2587
2588         return rc;
2589 }
2590
2591 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2592                                 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
2593 {
2594         struct hl_device *hdev = ctx->hdev;
2595         ktime_t timestamp_kt;
2596         long completion_rc;
2597         int rc = 0, error;
2598
2599         if (IS_ERR(fence)) {
2600                 rc = PTR_ERR(fence);
2601                 if (rc == -EINVAL)
2602                         dev_notice_ratelimited(hdev->dev,
2603                                 "Can't wait on CS %llu because current CS is at seq %llu\n",
2604                                 seq, ctx->cs_sequence);
2605                 return rc;
2606         }
2607
2608         if (!fence) {
2609                 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, &timestamp_kt, &error)) {
2610                         dev_dbg(hdev->dev,
2611                                 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2612                                 seq, ctx->cs_sequence);
2613                         *status = CS_WAIT_STATUS_GONE;
2614                         return 0;
2615                 }
2616
2617                 completion_rc = 1;
2618                 goto report_results;
2619         }
2620
2621         if (!timeout_us) {
2622                 completion_rc = completion_done(&fence->completion);
2623         } else {
2624                 unsigned long timeout;
2625
2626                 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2627                                 timeout_us : usecs_to_jiffies(timeout_us);
2628                 completion_rc =
2629                         wait_for_completion_interruptible_timeout(
2630                                 &fence->completion, timeout);
2631         }
2632
2633         error = fence->error;
2634         timestamp_kt = fence->timestamp;
2635
2636 report_results:
2637         if (completion_rc > 0) {
2638                 *status = CS_WAIT_STATUS_COMPLETED;
2639                 if (timestamp)
2640                         *timestamp = ktime_to_ns(timestamp_kt);
2641         } else {
2642                 *status = CS_WAIT_STATUS_BUSY;
2643         }
2644
2645         if (completion_rc == -ERESTARTSYS)
2646                 rc = completion_rc;
2647         else if (error == -ETIMEDOUT || error == -EIO)
2648                 rc = error;
2649
2650         return rc;
2651 }
2652
2653 /*
2654  * hl_cs_poll_fences - iterate CS fences to check for CS completion
2655  *
2656  * @mcs_data: multi-CS internal data
2657  * @mcs_compl: multi-CS completion structure
2658  *
2659  * @return 0 on success, otherwise non 0 error code
2660  *
2661  * The function iterates on all CS sequence in the list and set bit in
2662  * completion_bitmap for each completed CS.
2663  * While iterating, the function sets the stream map of each fence in the fence
2664  * array in the completion QID stream map to be used by CSs to perform
2665  * completion to the multi-CS context.
2666  * This function shall be called after taking context ref
2667  */
2668 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
2669 {
2670         struct hl_fence **fence_ptr = mcs_data->fence_arr;
2671         struct hl_device *hdev = mcs_data->ctx->hdev;
2672         int i, rc, arr_len = mcs_data->arr_len;
2673         u64 *seq_arr = mcs_data->seq_arr;
2674         ktime_t max_ktime, first_cs_time;
2675         enum hl_cs_wait_status status;
2676
2677         memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
2678
2679         /* get all fences under the same lock */
2680         rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2681         if (rc)
2682                 return rc;
2683
2684         /*
2685          * re-initialize the completion here to handle 2 possible cases:
2686          * 1. CS will complete the multi-CS prior clearing the completion. in which
2687          *    case the fence iteration is guaranteed to catch the CS completion.
2688          * 2. the completion will occur after re-init of the completion.
2689          *    in which case we will wake up immediately in wait_for_completion.
2690          */
2691         reinit_completion(&mcs_compl->completion);
2692
2693         /*
2694          * set to maximum time to verify timestamp is valid: if at the end
2695          * this value is maintained- no timestamp was updated
2696          */
2697         max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2698         first_cs_time = max_ktime;
2699
2700         for (i = 0; i < arr_len; i++, fence_ptr++) {
2701                 struct hl_fence *fence = *fence_ptr;
2702
2703                 /*
2704                  * In order to prevent case where we wait until timeout even though a CS associated
2705                  * with the multi-CS actually completed we do things in the below order:
2706                  * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
2707                  *    any CS can, potentially, complete the multi CS for the specific QID (note
2708                  *    that once completion is initialized, calling complete* and then wait on the
2709                  *    completion will cause it to return at once)
2710                  * 2. only after allowing multi-CS completion for the specific QID we check whether
2711                  *    the specific CS already completed (and thus the wait for completion part will
2712                  *    be skipped). if the CS not completed it is guaranteed that completing CS will
2713                  *    wake up the completion.
2714                  */
2715                 if (fence)
2716                         mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
2717
2718                 /*
2719                  * function won't sleep as it is called with timeout 0 (i.e.
2720                  * poll the fence)
2721                  */
2722                 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2723                 if (rc) {
2724                         dev_err(hdev->dev,
2725                                 "wait_for_fence error :%d for CS seq %llu\n",
2726                                                                 rc, seq_arr[i]);
2727                         break;
2728                 }
2729
2730                 switch (status) {
2731                 case CS_WAIT_STATUS_BUSY:
2732                         /* CS did not finished, QID to wait on already stored */
2733                         break;
2734                 case CS_WAIT_STATUS_COMPLETED:
2735                         /*
2736                          * Using mcs_handling_done to avoid possibility of mcs_data
2737                          * returns to user indicating CS completed before it finished
2738                          * all of its mcs handling, to avoid race the next time the
2739                          * user waits for mcs.
2740                          * note: when reaching this case fence is definitely not NULL
2741                          *       but NULL check was added to overcome static analysis
2742                          */
2743                         if (fence && !fence->mcs_handling_done) {
2744                                 /*
2745                                  * in case multi CS is completed but MCS handling not done
2746                                  * we "complete" the multi CS to prevent it from waiting
2747                                  * until time-out and the "multi-CS handling done" will have
2748                                  * another chance at the next iteration
2749                                  */
2750                                 complete_all(&mcs_compl->completion);
2751                                 break;
2752                         }
2753
2754                         mcs_data->completion_bitmap |= BIT(i);
2755                         /*
2756                          * For all completed CSs we take the earliest timestamp.
2757                          * For this we have to validate that the timestamp is
2758                          * earliest of all timestamps so far.
2759                          */
2760                         if (fence && mcs_data->update_ts &&
2761                                         (ktime_compare(fence->timestamp, first_cs_time) < 0))
2762                                 first_cs_time = fence->timestamp;
2763                         break;
2764                 case CS_WAIT_STATUS_GONE:
2765                         mcs_data->update_ts = false;
2766                         mcs_data->gone_cs = true;
2767                         /*
2768                          * It is possible to get an old sequence numbers from user
2769                          * which related to already completed CSs and their fences
2770                          * already gone. In this case, CS set as completed but
2771                          * no need to consider its QID for mcs completion.
2772                          */
2773                         mcs_data->completion_bitmap |= BIT(i);
2774                         break;
2775                 default:
2776                         dev_err(hdev->dev, "Invalid fence status\n");
2777                         rc = -EINVAL;
2778                         break;
2779                 }
2780
2781         }
2782
2783         hl_fences_put(mcs_data->fence_arr, arr_len);
2784
2785         if (mcs_data->update_ts &&
2786                         (ktime_compare(first_cs_time, max_ktime) != 0))
2787                 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2788
2789         return rc;
2790 }
2791
2792 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2793                                 enum hl_cs_wait_status *status, s64 *timestamp)
2794 {
2795         struct hl_fence *fence;
2796         int rc = 0;
2797
2798         if (timestamp)
2799                 *timestamp = 0;
2800
2801         hl_ctx_get(ctx);
2802
2803         fence = hl_ctx_get_fence(ctx, seq);
2804
2805         rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2806         hl_fence_put(fence);
2807         hl_ctx_put(ctx);
2808
2809         return rc;
2810 }
2811
2812 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
2813 {
2814         if (usecs <= U32_MAX)
2815                 return usecs_to_jiffies(usecs);
2816
2817         /*
2818          * If the value in nanoseconds is larger than 64 bit, use the largest
2819          * 64 bit value.
2820          */
2821         if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
2822                 return nsecs_to_jiffies(U64_MAX);
2823
2824         return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
2825 }
2826
2827 /*
2828  * hl_wait_multi_cs_completion_init - init completion structure
2829  *
2830  * @hdev: pointer to habanalabs device structure
2831  * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2832  *                        master QID to wait on
2833  *
2834  * @return valid completion struct pointer on success, otherwise error pointer
2835  *
2836  * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2837  * the function gets the first available completion (by marking it "used")
2838  * and initialize its values.
2839  */
2840 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
2841 {
2842         struct multi_cs_completion *mcs_compl;
2843         int i;
2844
2845         /* find free multi_cs completion structure */
2846         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2847                 mcs_compl = &hdev->multi_cs_completion[i];
2848                 spin_lock(&mcs_compl->lock);
2849                 if (!mcs_compl->used) {
2850                         mcs_compl->used = 1;
2851                         mcs_compl->timestamp = 0;
2852                         /*
2853                          * init QID map to 0 to avoid completion by CSs. the actual QID map
2854                          * to multi-CS CSs will be set incrementally at a later stage
2855                          */
2856                         mcs_compl->stream_master_qid_map = 0;
2857                         spin_unlock(&mcs_compl->lock);
2858                         break;
2859                 }
2860                 spin_unlock(&mcs_compl->lock);
2861         }
2862
2863         if (i == MULTI_CS_MAX_USER_CTX) {
2864                 dev_err(hdev->dev, "no available multi-CS completion structure\n");
2865                 return ERR_PTR(-ENOMEM);
2866         }
2867         return mcs_compl;
2868 }
2869
2870 /*
2871  * hl_wait_multi_cs_completion_fini - return completion structure and set as
2872  *                                    unused
2873  *
2874  * @mcs_compl: pointer to the completion structure
2875  */
2876 static void hl_wait_multi_cs_completion_fini(
2877                                         struct multi_cs_completion *mcs_compl)
2878 {
2879         /*
2880          * free completion structure, do it under lock to be in-sync with the
2881          * thread that signals completion
2882          */
2883         spin_lock(&mcs_compl->lock);
2884         mcs_compl->used = 0;
2885         spin_unlock(&mcs_compl->lock);
2886 }
2887
2888 /*
2889  * hl_wait_multi_cs_completion - wait for first CS to complete
2890  *
2891  * @mcs_data: multi-CS internal data
2892  *
2893  * @return 0 on success, otherwise non 0 error code
2894  */
2895 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
2896                                                 struct multi_cs_completion *mcs_compl)
2897 {
2898         long completion_rc;
2899
2900         completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
2901                                                                         mcs_data->timeout_jiffies);
2902
2903         /* update timestamp */
2904         if (completion_rc > 0)
2905                 mcs_data->timestamp = mcs_compl->timestamp;
2906
2907         if (completion_rc == -ERESTARTSYS)
2908                 return completion_rc;
2909
2910         mcs_data->wait_status = completion_rc;
2911
2912         return 0;
2913 }
2914
2915 /*
2916  * hl_multi_cs_completion_init - init array of multi-CS completion structures
2917  *
2918  * @hdev: pointer to habanalabs device structure
2919  */
2920 void hl_multi_cs_completion_init(struct hl_device *hdev)
2921 {
2922         struct multi_cs_completion *mcs_cmpl;
2923         int i;
2924
2925         for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2926                 mcs_cmpl = &hdev->multi_cs_completion[i];
2927                 mcs_cmpl->used = 0;
2928                 spin_lock_init(&mcs_cmpl->lock);
2929                 init_completion(&mcs_cmpl->completion);
2930         }
2931 }
2932
2933 /*
2934  * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2935  *
2936  * @hpriv: pointer to the private data of the fd
2937  * @data: pointer to multi-CS wait ioctl in/out args
2938  *
2939  */
2940 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2941 {
2942         struct multi_cs_completion *mcs_compl;
2943         struct hl_device *hdev = hpriv->hdev;
2944         struct multi_cs_data mcs_data = {};
2945         union hl_wait_cs_args *args = data;
2946         struct hl_ctx *ctx = hpriv->ctx;
2947         struct hl_fence **fence_arr;
2948         void __user *seq_arr;
2949         u32 size_to_copy;
2950         u64 *cs_seq_arr;
2951         u8 seq_arr_len;
2952         int rc, i;
2953
2954         for (i = 0 ; i < sizeof(args->in.pad) ; i++)
2955                 if (args->in.pad[i]) {
2956                         dev_dbg(hdev->dev, "Padding bytes must be 0\n");
2957                         return -EINVAL;
2958                 }
2959
2960         if (!hdev->supports_wait_for_multi_cs) {
2961                 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2962                 return -EPERM;
2963         }
2964
2965         seq_arr_len = args->in.seq_arr_len;
2966
2967         if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2968                 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2969                                 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2970                 return -EINVAL;
2971         }
2972
2973         /* allocate memory for sequence array */
2974         cs_seq_arr =
2975                 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2976         if (!cs_seq_arr)
2977                 return -ENOMEM;
2978
2979         /* copy CS sequence array from user */
2980         seq_arr = (void __user *) (uintptr_t) args->in.seq;
2981         size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2982         if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2983                 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2984                 rc = -EFAULT;
2985                 goto free_seq_arr;
2986         }
2987
2988         /* allocate array for the fences */
2989         fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
2990         if (!fence_arr) {
2991                 rc = -ENOMEM;
2992                 goto free_seq_arr;
2993         }
2994
2995         /* initialize the multi-CS internal data */
2996         mcs_data.ctx = ctx;
2997         mcs_data.seq_arr = cs_seq_arr;
2998         mcs_data.fence_arr = fence_arr;
2999         mcs_data.arr_len = seq_arr_len;
3000
3001         hl_ctx_get(ctx);
3002
3003         /* wait (with timeout) for the first CS to be completed */
3004         mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
3005         mcs_compl = hl_wait_multi_cs_completion_init(hdev);
3006         if (IS_ERR(mcs_compl)) {
3007                 rc = PTR_ERR(mcs_compl);
3008                 goto put_ctx;
3009         }
3010
3011         /* poll all CS fences, extract timestamp */
3012         mcs_data.update_ts = true;
3013         rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3014         /*
3015          * skip wait for CS completion when one of the below is true:
3016          * - an error on the poll function
3017          * - one or more CS in the list completed
3018          * - the user called ioctl with timeout 0
3019          */
3020         if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
3021                 goto completion_fini;
3022
3023         while (true) {
3024                 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
3025                 if (rc || (mcs_data.wait_status == 0))
3026                         break;
3027
3028                 /*
3029                  * poll fences once again to update the CS map.
3030                  * no timestamp should be updated this time.
3031                  */
3032                 mcs_data.update_ts = false;
3033                 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3034
3035                 if (rc || mcs_data.completion_bitmap)
3036                         break;
3037
3038                 /*
3039                  * if hl_wait_multi_cs_completion returned before timeout (i.e.
3040                  * it got a completion) it either got completed by CS in the multi CS list
3041                  * (in which case the indication will be non empty completion_bitmap) or it
3042                  * got completed by CS submitted to one of the shared stream master but
3043                  * not in the multi CS list (in which case we should wait again but modify
3044                  * the timeout and set timestamp as zero to let a CS related to the current
3045                  * multi-CS set a new, relevant, timestamp)
3046                  */
3047                 mcs_data.timeout_jiffies = mcs_data.wait_status;
3048                 mcs_compl->timestamp = 0;
3049         }
3050
3051 completion_fini:
3052         hl_wait_multi_cs_completion_fini(mcs_compl);
3053
3054 put_ctx:
3055         hl_ctx_put(ctx);
3056         kfree(fence_arr);
3057
3058 free_seq_arr:
3059         kfree(cs_seq_arr);
3060
3061         if (rc == -ERESTARTSYS) {
3062                 dev_err_ratelimited(hdev->dev,
3063                                 "user process got signal while waiting for Multi-CS\n");
3064                 rc = -EINTR;
3065         }
3066
3067         if (rc)
3068                 return rc;
3069
3070         /* update output args */
3071         memset(args, 0, sizeof(*args));
3072
3073         if (mcs_data.completion_bitmap) {
3074                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3075                 args->out.cs_completion_map = mcs_data.completion_bitmap;
3076
3077                 /* if timestamp not 0- it's valid */
3078                 if (mcs_data.timestamp) {
3079                         args->out.timestamp_nsec = mcs_data.timestamp;
3080                         args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3081                 }
3082
3083                 /* update if some CS was gone */
3084                 if (!mcs_data.timestamp)
3085                         args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3086         } else {
3087                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3088         }
3089
3090         return 0;
3091 }
3092
3093 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3094 {
3095         struct hl_device *hdev = hpriv->hdev;
3096         union hl_wait_cs_args *args = data;
3097         enum hl_cs_wait_status status;
3098         u64 seq = args->in.seq;
3099         s64 timestamp;
3100         int rc;
3101
3102         rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, &timestamp);
3103
3104         if (rc == -ERESTARTSYS) {
3105                 dev_err_ratelimited(hdev->dev,
3106                         "user process got signal while waiting for CS handle %llu\n",
3107                         seq);
3108                 return -EINTR;
3109         }
3110
3111         memset(args, 0, sizeof(*args));
3112
3113         if (rc) {
3114                 if (rc == -ETIMEDOUT) {
3115                         dev_err_ratelimited(hdev->dev,
3116                                 "CS %llu has timed-out while user process is waiting for it\n",
3117                                 seq);
3118                         args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
3119                 } else if (rc == -EIO) {
3120                         dev_err_ratelimited(hdev->dev,
3121                                 "CS %llu has been aborted while user process is waiting for it\n",
3122                                 seq);
3123                         args->out.status = HL_WAIT_CS_STATUS_ABORTED;
3124                 }
3125                 return rc;
3126         }
3127
3128         if (timestamp) {
3129                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3130                 args->out.timestamp_nsec = timestamp;
3131         }
3132
3133         switch (status) {
3134         case CS_WAIT_STATUS_GONE:
3135                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3136                 fallthrough;
3137         case CS_WAIT_STATUS_COMPLETED:
3138                 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3139                 break;
3140         case CS_WAIT_STATUS_BUSY:
3141         default:
3142                 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3143                 break;
3144         }
3145
3146         return 0;
3147 }
3148
3149 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
3150                                         struct hl_cb *cq_cb,
3151                                         u64 ts_offset, u64 cq_offset, u64 target_value,
3152                                         spinlock_t *wait_list_lock,
3153                                         struct hl_user_pending_interrupt **pend)
3154 {
3155         struct hl_ts_buff *ts_buff = buf->private;
3156         struct hl_user_pending_interrupt *requested_offset_record =
3157                                 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3158                                 ts_offset;
3159         struct hl_user_pending_interrupt *cb_last =
3160                         (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3161                         (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
3162         unsigned long flags, iter_counter = 0;
3163         u64 current_cq_counter;
3164         ktime_t timestamp;
3165
3166         /* Validate ts_offset not exceeding last max */
3167         if (requested_offset_record >= cb_last) {
3168                 dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
3169                                                                 (u64)(uintptr_t)cb_last);
3170                 return -EINVAL;
3171         }
3172
3173         timestamp = ktime_get();
3174
3175 start_over:
3176         spin_lock_irqsave(wait_list_lock, flags);
3177
3178         /* Unregister only if we didn't reach the target value
3179          * since in this case there will be no handling in irq context
3180          * and then it's safe to delete the node out of the interrupt list
3181          * then re-use it on other interrupt
3182          */
3183         if (requested_offset_record->ts_reg_info.in_use) {
3184                 current_cq_counter = *requested_offset_record->cq_kernel_addr;
3185                 if (current_cq_counter < requested_offset_record->cq_target_value) {
3186                         list_del(&requested_offset_record->wait_list_node);
3187                         spin_unlock_irqrestore(wait_list_lock, flags);
3188
3189                         hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
3190                         hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
3191
3192                         dev_dbg(buf->mmg->dev,
3193                                 "ts node removed from interrupt list now can re-use\n");
3194                 } else {
3195                         dev_dbg(buf->mmg->dev,
3196                                 "ts node in middle of irq handling\n");
3197
3198                         /* irq handling in the middle give it time to finish */
3199                         spin_unlock_irqrestore(wait_list_lock, flags);
3200                         usleep_range(100, 1000);
3201                         if (++iter_counter == MAX_TS_ITER_NUM) {
3202                                 dev_err(buf->mmg->dev,
3203                                         "Timestamp offset processing reached timeout of %lld ms\n",
3204                                         ktime_ms_delta(ktime_get(), timestamp));
3205                                 return -EAGAIN;
3206                         }
3207
3208                         goto start_over;
3209                 }
3210         } else {
3211                 /* Fill up the new registration node info */
3212                 requested_offset_record->ts_reg_info.buf = buf;
3213                 requested_offset_record->ts_reg_info.cq_cb = cq_cb;
3214                 requested_offset_record->ts_reg_info.timestamp_kernel_addr =
3215                                 (u64 *) ts_buff->user_buff_address + ts_offset;
3216                 requested_offset_record->cq_kernel_addr =
3217                                 (u64 *) cq_cb->kernel_address + cq_offset;
3218                 requested_offset_record->cq_target_value = target_value;
3219
3220                 spin_unlock_irqrestore(wait_list_lock, flags);
3221         }
3222
3223         *pend = requested_offset_record;
3224
3225         dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
3226                 requested_offset_record);
3227         return 0;
3228 }
3229
3230 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3231                                 struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
3232                                 u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
3233                                 u64 target_value, struct hl_user_interrupt *interrupt,
3234                                 bool register_ts_record, u64 ts_handle, u64 ts_offset,
3235                                 u32 *status, u64 *timestamp)
3236 {
3237         struct hl_user_pending_interrupt *pend;
3238         struct hl_mmap_mem_buf *buf;
3239         struct hl_cb *cq_cb;
3240         unsigned long timeout, flags;
3241         long completion_rc;
3242         int rc = 0;
3243
3244         timeout = hl_usecs64_to_jiffies(timeout_us);
3245
3246         hl_ctx_get(ctx);
3247
3248         cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
3249         if (!cq_cb) {
3250                 rc = -EINVAL;
3251                 goto put_ctx;
3252         }
3253
3254         /* Validate the cq offset */
3255         if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
3256                         ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
3257                 rc = -EINVAL;
3258                 goto put_cq_cb;
3259         }
3260
3261         if (register_ts_record) {
3262                 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
3263                                         interrupt->interrupt_id, ts_offset, cq_counters_offset);
3264                 buf = hl_mmap_mem_buf_get(mmg, ts_handle);
3265                 if (!buf) {
3266                         rc = -EINVAL;
3267                         goto put_cq_cb;
3268                 }
3269
3270                 /* get ts buffer record */
3271                 rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
3272                                                 cq_counters_offset, target_value,
3273                                                 &interrupt->wait_list_lock, &pend);
3274                 if (rc)
3275                         goto put_ts_buff;
3276         } else {
3277                 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3278                 if (!pend) {
3279                         rc = -ENOMEM;
3280                         goto put_cq_cb;
3281                 }
3282                 hl_fence_init(&pend->fence, ULONG_MAX);
3283                 pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
3284                 pend->cq_target_value = target_value;
3285         }
3286
3287         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3288
3289         /* We check for completion value as interrupt could have been received
3290          * before we added the node to the wait list
3291          */
3292         if (*pend->cq_kernel_addr >= target_value) {
3293                 if (register_ts_record)
3294                         pend->ts_reg_info.in_use = 0;
3295                 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3296
3297                 *status = HL_WAIT_CS_STATUS_COMPLETED;
3298
3299                 if (register_ts_record) {
3300                         *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
3301                         goto put_ts_buff;
3302                 } else {
3303                         pend->fence.timestamp = ktime_get();
3304                         goto set_timestamp;
3305                 }
3306         } else if (!timeout_us) {
3307                 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3308                 *status = HL_WAIT_CS_STATUS_BUSY;
3309                 pend->fence.timestamp = ktime_get();
3310                 goto set_timestamp;
3311         }
3312
3313         /* Add pending user interrupt to relevant list for the interrupt
3314          * handler to monitor.
3315          * Note that we cannot have sorted list by target value,
3316          * in order to shorten the list pass loop, since
3317          * same list could have nodes for different cq counter handle.
3318          * Note:
3319          * Mark ts buff offset as in use here in the spinlock protection area
3320          * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
3321          * before adding the node to the list. this scenario might happen when
3322          * multiple threads are racing on same offset and one thread could
3323          * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
3324          * takes over and get to ts_buff_get_kernel_ts_record and then we will try
3325          * to re-use the same ts buff offset, and will try to delete a non existing
3326          * node from the list.
3327          */
3328         if (register_ts_record)
3329                 pend->ts_reg_info.in_use = 1;
3330
3331         list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3332         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3333
3334         if (register_ts_record) {
3335                 rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
3336                 goto ts_registration_exit;
3337         }
3338
3339         /* Wait for interrupt handler to signal completion */
3340         completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3341                                                                 timeout);
3342         if (completion_rc > 0) {
3343                 *status = HL_WAIT_CS_STATUS_COMPLETED;
3344         } else {
3345                 if (completion_rc == -ERESTARTSYS) {
3346                         dev_err_ratelimited(hdev->dev,
3347                                         "user process got signal while waiting for interrupt ID %d\n",
3348                                         interrupt->interrupt_id);
3349                         rc = -EINTR;
3350                         *status = HL_WAIT_CS_STATUS_ABORTED;
3351                 } else {
3352                         if (pend->fence.error == -EIO) {
3353                                 dev_err_ratelimited(hdev->dev,
3354                                                 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3355                                                 pend->fence.error);
3356                                 rc = -EIO;
3357                                 *status = HL_WAIT_CS_STATUS_ABORTED;
3358                         } else {
3359                                 /* The wait has timed-out. We don't know anything beyond that
3360                                  * because the workload wasn't submitted through the driver.
3361                                  * Therefore, from driver's perspective, the workload is still
3362                                  * executing.
3363                                  */
3364                                 rc = 0;
3365                                 *status = HL_WAIT_CS_STATUS_BUSY;
3366                         }
3367                 }
3368         }
3369
3370         /*
3371          * We keep removing the node from list here, and not at the irq handler
3372          * for completion timeout case. and if it's a registration
3373          * for ts record, the node will be deleted in the irq handler after
3374          * we reach the target value.
3375          */
3376         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3377         list_del(&pend->wait_list_node);
3378         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3379
3380 set_timestamp:
3381         *timestamp = ktime_to_ns(pend->fence.timestamp);
3382         kfree(pend);
3383         hl_cb_put(cq_cb);
3384 ts_registration_exit:
3385         hl_ctx_put(ctx);
3386
3387         return rc;
3388
3389 put_ts_buff:
3390         hl_mmap_mem_buf_put(buf);
3391 put_cq_cb:
3392         hl_cb_put(cq_cb);
3393 put_ctx:
3394         hl_ctx_put(ctx);
3395
3396         return rc;
3397 }
3398
3399 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3400                                 u64 timeout_us, u64 user_address,
3401                                 u64 target_value, struct hl_user_interrupt *interrupt,
3402                                 u32 *status,
3403                                 u64 *timestamp)
3404 {
3405         struct hl_user_pending_interrupt *pend;
3406         unsigned long timeout, flags;
3407         u64 completion_value;
3408         long completion_rc;
3409         int rc = 0;
3410
3411         timeout = hl_usecs64_to_jiffies(timeout_us);
3412
3413         hl_ctx_get(ctx);
3414
3415         pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3416         if (!pend) {
3417                 hl_ctx_put(ctx);
3418                 return -ENOMEM;
3419         }
3420
3421         hl_fence_init(&pend->fence, ULONG_MAX);
3422
3423         /* Add pending user interrupt to relevant list for the interrupt
3424          * handler to monitor
3425          */
3426         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3427         list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3428         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3429
3430         /* We check for completion value as interrupt could have been received
3431          * before we added the node to the wait list
3432          */
3433         if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3434                 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3435                 rc = -EFAULT;
3436                 goto remove_pending_user_interrupt;
3437         }
3438
3439         if (completion_value >= target_value) {
3440                 *status = HL_WAIT_CS_STATUS_COMPLETED;
3441                 /* There was no interrupt, we assume the completion is now. */
3442                 pend->fence.timestamp = ktime_get();
3443         } else {
3444                 *status = HL_WAIT_CS_STATUS_BUSY;
3445         }
3446
3447         if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
3448                 goto remove_pending_user_interrupt;
3449
3450 wait_again:
3451         /* Wait for interrupt handler to signal completion */
3452         completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3453                                                                                 timeout);
3454
3455         /* If timeout did not expire we need to perform the comparison.
3456          * If comparison fails, keep waiting until timeout expires
3457          */
3458         if (completion_rc > 0) {
3459                 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3460                 /* reinit_completion must be called before we check for user
3461                  * completion value, otherwise, if interrupt is received after
3462                  * the comparison and before the next wait_for_completion,
3463                  * we will reach timeout and fail
3464                  */
3465                 reinit_completion(&pend->fence.completion);
3466                 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3467
3468                 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3469                         dev_err(hdev->dev, "Failed to copy completion value from user\n");
3470                         rc = -EFAULT;
3471
3472                         goto remove_pending_user_interrupt;
3473                 }
3474
3475                 if (completion_value >= target_value) {
3476                         *status = HL_WAIT_CS_STATUS_COMPLETED;
3477                 } else if (pend->fence.error) {
3478                         dev_err_ratelimited(hdev->dev,
3479                                 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3480                                 pend->fence.error);
3481                         /* set the command completion status as ABORTED */
3482                         *status = HL_WAIT_CS_STATUS_ABORTED;
3483                 } else {
3484                         timeout = completion_rc;
3485                         goto wait_again;
3486                 }
3487         } else if (completion_rc == -ERESTARTSYS) {
3488                 dev_err_ratelimited(hdev->dev,
3489                         "user process got signal while waiting for interrupt ID %d\n",
3490                         interrupt->interrupt_id);
3491                 rc = -EINTR;
3492         } else {
3493                 /* The wait has timed-out. We don't know anything beyond that
3494                  * because the workload wasn't submitted through the driver.
3495                  * Therefore, from driver's perspective, the workload is still
3496                  * executing.
3497                  */
3498                 rc = 0;
3499                 *status = HL_WAIT_CS_STATUS_BUSY;
3500         }
3501
3502 remove_pending_user_interrupt:
3503         spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3504         list_del(&pend->wait_list_node);
3505         spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3506
3507         *timestamp = ktime_to_ns(pend->fence.timestamp);
3508
3509         kfree(pend);
3510         hl_ctx_put(ctx);
3511
3512         return rc;
3513 }
3514
3515 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3516 {
3517         u16 interrupt_id, first_interrupt, last_interrupt;
3518         struct hl_device *hdev = hpriv->hdev;
3519         struct asic_fixed_properties *prop;
3520         struct hl_user_interrupt *interrupt;
3521         union hl_wait_cs_args *args = data;
3522         u32 status = HL_WAIT_CS_STATUS_BUSY;
3523         u64 timestamp = 0;
3524         int rc, int_idx;
3525
3526         prop = &hdev->asic_prop;
3527
3528         if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
3529                 dev_err(hdev->dev, "no user interrupts allowed");
3530                 return -EPERM;
3531         }
3532
3533         interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
3534
3535         first_interrupt = prop->first_available_user_interrupt;
3536         last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
3537
3538         if (interrupt_id < prop->user_dec_intr_count) {
3539
3540                 /* Check if the requested core is enabled */
3541                 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
3542                         dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
3543                                 interrupt_id);
3544                         return -EINVAL;
3545                 }
3546
3547                 interrupt = &hdev->user_interrupt[interrupt_id];
3548
3549         } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
3550
3551                 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
3552                 interrupt = &hdev->user_interrupt[int_idx];
3553
3554         } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
3555                 interrupt = &hdev->common_user_cq_interrupt;
3556         } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
3557                 interrupt = &hdev->common_decoder_interrupt;
3558         } else {
3559                 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
3560                 return -EINVAL;
3561         }
3562
3563         if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
3564                 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
3565                                 args->in.interrupt_timeout_us, args->in.cq_counters_handle,
3566                                 args->in.cq_counters_offset,
3567                                 args->in.target, interrupt,
3568                                 !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
3569                                 args->in.timestamp_handle, args->in.timestamp_offset,
3570                                 &status, &timestamp);
3571         else
3572                 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
3573                                 args->in.interrupt_timeout_us, args->in.addr,
3574                                 args->in.target, interrupt, &status,
3575                                 &timestamp);
3576         if (rc)
3577                 return rc;
3578
3579         memset(args, 0, sizeof(*args));
3580         args->out.status = status;
3581
3582         if (timestamp) {
3583                 args->out.timestamp_nsec = timestamp;
3584                 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3585         }
3586
3587         return 0;
3588 }
3589
3590 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3591 {
3592         struct hl_device *hdev = hpriv->hdev;
3593         union hl_wait_cs_args *args = data;
3594         u32 flags = args->in.flags;
3595         int rc;
3596
3597         /* If the device is not operational, or if an error has happened and user should release the
3598          * device, there is no point in waiting for any command submission or user interrupt.
3599          */
3600         if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
3601                 return -EBUSY;
3602
3603         if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
3604                 rc = hl_interrupt_wait_ioctl(hpriv, data);
3605         else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
3606                 rc = hl_multi_cs_wait_ioctl(hpriv, data);
3607         else
3608                 rc = hl_cs_wait_ioctl(hpriv, data);
3609
3610         return rc;
3611 }