1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT)
18 * enum hl_cs_wait_status - cs wait status
19 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20 * @CS_WAIT_STATUS_COMPLETED: cs completed
21 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
23 enum hl_cs_wait_status {
25 CS_WAIT_STATUS_COMPLETED,
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31 u64 timeout_us, u64 seq,
32 enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
35 static void hl_sob_reset(struct kref *ref)
37 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
39 struct hl_device *hdev = hw_sob->hdev;
41 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
43 hdev->asic_funcs->reset_sob(hdev, hw_sob);
45 hw_sob->need_reset = false;
48 void hl_sob_reset_error(struct kref *ref)
50 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
52 struct hl_device *hdev = hw_sob->hdev;
55 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
56 hw_sob->q_idx, hw_sob->sob_id);
59 void hw_sob_put(struct hl_hw_sob *hw_sob)
62 kref_put(&hw_sob->kref, hl_sob_reset);
65 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
68 kref_put(&hw_sob->kref, hl_sob_reset_error);
71 void hw_sob_get(struct hl_hw_sob *hw_sob)
74 kref_get(&hw_sob->kref);
78 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
79 * @sob_base: sob base id
80 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
81 * @mask: generated mask
83 * Return: 0 if given parameters are valid
85 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
92 if (sob_mask == 0x1) {
93 *mask = ~(1 << (sob_base & 0x7));
95 /* find msb in order to verify sob range is valid */
96 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
97 if (BIT(i) & sob_mask)
100 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
109 static void hl_fence_release(struct kref *kref)
111 struct hl_fence *fence =
112 container_of(kref, struct hl_fence, refcount);
113 struct hl_cs_compl *hl_cs_cmpl =
114 container_of(fence, struct hl_cs_compl, base_fence);
119 void hl_fence_put(struct hl_fence *fence)
121 if (IS_ERR_OR_NULL(fence))
123 kref_put(&fence->refcount, hl_fence_release);
126 void hl_fences_put(struct hl_fence **fence, int len)
130 for (i = 0; i < len; i++, fence++)
131 hl_fence_put(*fence);
134 void hl_fence_get(struct hl_fence *fence)
137 kref_get(&fence->refcount);
140 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
142 kref_init(&fence->refcount);
143 fence->cs_sequence = sequence;
145 fence->timestamp = ktime_set(0, 0);
146 init_completion(&fence->completion);
149 void cs_get(struct hl_cs *cs)
151 kref_get(&cs->refcount);
154 static int cs_get_unless_zero(struct hl_cs *cs)
156 return kref_get_unless_zero(&cs->refcount);
159 static void cs_put(struct hl_cs *cs)
161 kref_put(&cs->refcount, cs_do_release);
164 static void cs_job_do_release(struct kref *ref)
166 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
171 static void cs_job_put(struct hl_cs_job *job)
173 kref_put(&job->refcount, cs_job_do_release);
176 bool cs_needs_completion(struct hl_cs *cs)
178 /* In case this is a staged CS, only the last CS in sequence should
179 * get a completion, any non staged CS will always get a completion
181 if (cs->staged_cs && !cs->staged_last)
187 bool cs_needs_timeout(struct hl_cs *cs)
189 /* In case this is a staged CS, only the first CS in sequence should
190 * get a timeout, any non staged CS will always get a timeout
192 if (cs->staged_cs && !cs->staged_first)
198 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
201 * Patched CB is created for external queues jobs, and for H/W queues
202 * jobs if the user CB was allocated by driver and MMU is disabled.
204 return (job->queue_type == QUEUE_TYPE_EXT ||
205 (job->queue_type == QUEUE_TYPE_HW &&
206 job->is_kernel_allocated_cb &&
211 * cs_parser - parse the user command submission
213 * @hpriv : pointer to the private data of the fd
214 * @job : pointer to the job that holds the command submission info
216 * The function parses the command submission of the user. It calls the
217 * ASIC specific parser, which returns a list of memory blocks to send
218 * to the device as different command buffers
221 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
223 struct hl_device *hdev = hpriv->hdev;
224 struct hl_cs_parser parser;
227 parser.ctx_id = job->cs->ctx->asid;
228 parser.cs_sequence = job->cs->sequence;
229 parser.job_id = job->id;
231 parser.hw_queue_id = job->hw_queue_id;
232 parser.job_userptr_list = &job->userptr_list;
233 parser.patched_cb = NULL;
234 parser.user_cb = job->user_cb;
235 parser.user_cb_size = job->user_cb_size;
236 parser.queue_type = job->queue_type;
237 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
238 job->patched_cb = NULL;
239 parser.completion = cs_needs_completion(job->cs);
241 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
243 if (is_cb_patched(hdev, job)) {
245 job->patched_cb = parser.patched_cb;
246 job->job_cb_size = parser.patched_cb_size;
247 job->contains_dma_pkt = parser.contains_dma_pkt;
248 atomic_inc(&job->patched_cb->cs_cnt);
252 * Whether the parsing worked or not, we don't need the
253 * original CB anymore because it was already parsed and
254 * won't be accessed again for this CS
256 atomic_dec(&job->user_cb->cs_cnt);
257 hl_cb_put(job->user_cb);
260 job->job_cb_size = job->user_cb_size;
266 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
268 struct hl_cs *cs = job->cs;
270 if (is_cb_patched(hdev, job)) {
271 hl_userptr_delete_list(hdev, &job->userptr_list);
274 * We might arrive here from rollback and patched CB wasn't
275 * created, so we need to check it's not NULL
277 if (job->patched_cb) {
278 atomic_dec(&job->patched_cb->cs_cnt);
279 hl_cb_put(job->patched_cb);
283 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
284 * enabled, the user CB isn't released in cs_parser() and thus should be
286 * This is also true for INT queues jobs which were allocated by driver
288 if (job->is_kernel_allocated_cb &&
289 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
290 job->queue_type == QUEUE_TYPE_INT)) {
291 atomic_dec(&job->user_cb->cs_cnt);
292 hl_cb_put(job->user_cb);
296 * This is the only place where there can be multiple threads
297 * modifying the list at the same time
299 spin_lock(&cs->job_lock);
300 list_del(&job->cs_node);
301 spin_unlock(&cs->job_lock);
303 hl_debugfs_remove_job(hdev, job);
305 /* We decrement reference only for a CS that gets completion
306 * because the reference was incremented only for this kind of CS
307 * right before it was scheduled.
309 * In staged submission, only the last CS marked as 'staged_last'
310 * gets completion, hence its release function will be called from here.
311 * As for all the rest CS's in the staged submission which do not get
312 * completion, their CS reference will be decremented by the
313 * 'staged_last' CS during the CS release flow.
314 * All relevant PQ CI counters will be incremented during the CS release
315 * flow by calling 'hl_hw_queue_update_ci'.
317 if (cs_needs_completion(cs) &&
318 (job->queue_type == QUEUE_TYPE_EXT ||
319 job->queue_type == QUEUE_TYPE_HW))
326 * hl_staged_cs_find_first - locate the first CS in this staged submission
328 * @hdev: pointer to device structure
329 * @cs_seq: staged submission sequence number
331 * @note: This function must be called under 'hdev->cs_mirror_lock'
333 * Find and return a CS pointer with the given sequence
335 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
339 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
340 if (cs->staged_cs && cs->staged_first &&
341 cs->sequence == cs_seq)
348 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
350 * @hdev: pointer to device structure
351 * @cs: staged submission member
354 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
356 struct hl_cs *last_entry;
358 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
361 if (last_entry->staged_last)
368 * staged_cs_get - get CS reference if this CS is a part of a staged CS
370 * @hdev: pointer to device structure
372 * @cs_seq: staged submission sequence number
374 * Increment CS reference for every CS in this staged submission except for
375 * the CS which get completion.
377 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
379 /* Only the last CS in this staged submission will get a completion.
380 * We must increment the reference for all other CS's in this
382 * Once we get a completion we will release the whole staged submission.
384 if (!cs->staged_last)
389 * staged_cs_put - put a CS in case it is part of staged submission
391 * @hdev: pointer to device structure
394 * This function decrements a CS reference (for a non completion CS)
396 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
398 /* We release all CS's in a staged submission except the last
399 * CS which we have never incremented its reference.
401 if (!cs_needs_completion(cs))
405 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
407 bool next_entry_found = false;
410 if (!cs_needs_timeout(cs))
413 spin_lock(&hdev->cs_mirror_lock);
415 /* We need to handle tdr only once for the complete staged submission.
416 * Hence, we choose the CS that reaches this function first which is
417 * the CS marked as 'staged_last'.
419 if (cs->staged_cs && cs->staged_last)
420 cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
422 spin_unlock(&hdev->cs_mirror_lock);
424 /* Don't cancel TDR in case this CS was timedout because we might be
425 * running from the TDR context
427 if (cs && (cs->timedout ||
428 hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
431 if (cs && cs->tdr_active)
432 cancel_delayed_work_sync(&cs->work_tdr);
434 spin_lock(&hdev->cs_mirror_lock);
436 /* queue TDR for next CS */
437 list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
438 if (cs_needs_timeout(next)) {
439 next_entry_found = true;
443 if (next_entry_found && !next->tdr_active) {
444 next->tdr_active = true;
445 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
448 spin_unlock(&hdev->cs_mirror_lock);
452 * force_complete_multi_cs - complete all contexts that wait on multi-CS
454 * @hdev: pointer to habanalabs device structure
456 static void force_complete_multi_cs(struct hl_device *hdev)
460 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
461 struct multi_cs_completion *mcs_compl;
463 mcs_compl = &hdev->multi_cs_completion[i];
465 spin_lock(&mcs_compl->lock);
467 if (!mcs_compl->used) {
468 spin_unlock(&mcs_compl->lock);
472 /* when calling force complete no context should be waiting on
474 * We are calling the function as a protection for such case
475 * to free any pending context and print error message
478 "multi-CS completion context %d still waiting when calling force completion\n",
480 complete_all(&mcs_compl->completion);
481 spin_unlock(&mcs_compl->lock);
486 * complete_multi_cs - complete all waiting entities on multi-CS
488 * @hdev: pointer to habanalabs device structure
490 * The function signals a waiting entity that has an overlapping stream masters
491 * with the completed CS.
493 * - a completed CS worked on stream master QID 4, multi CS completion
494 * is actively waiting on stream master QIDs 3, 5. don't send signal as no
495 * common stream master QID
496 * - a completed CS worked on stream master QID 4, multi CS completion
497 * is actively waiting on stream master QIDs 3, 4. send signal as stream
498 * master QID 4 is common
500 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
502 struct hl_fence *fence = cs->fence;
505 /* in case of multi CS check for completion only for the first CS */
506 if (cs->staged_cs && !cs->staged_first)
509 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
510 struct multi_cs_completion *mcs_compl;
512 mcs_compl = &hdev->multi_cs_completion[i];
513 if (!mcs_compl->used)
516 spin_lock(&mcs_compl->lock);
520 * 1. still waiting for completion
521 * 2. the completed CS has at least one overlapping stream
522 * master with the stream masters in the completion
524 if (mcs_compl->used &&
525 (fence->stream_master_qid_map &
526 mcs_compl->stream_master_qid_map)) {
527 /* extract the timestamp only of first completed CS */
528 if (!mcs_compl->timestamp)
529 mcs_compl->timestamp =
530 ktime_to_ns(fence->timestamp);
531 complete_all(&mcs_compl->completion);
534 spin_unlock(&mcs_compl->lock);
538 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
540 struct hl_cs_compl *hl_cs_cmpl)
542 /* Skip this handler if the cs wasn't submitted, to avoid putting
543 * the hw_sob twice, since this case already handled at this point,
544 * also skip if the hw_sob pointer wasn't set.
546 if (!hl_cs_cmpl->hw_sob || !cs->submitted)
549 spin_lock(&hl_cs_cmpl->lock);
552 * we get refcount upon reservation of signals or signal/wait cs for the
553 * hw_sob object, and need to put it when the first staged cs
554 * (which cotains the encaps signals) or cs signal/wait is completed.
556 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
557 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
558 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
559 (!!hl_cs_cmpl->encaps_signals)) {
561 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
564 hl_cs_cmpl->hw_sob->sob_id,
565 hl_cs_cmpl->sob_val);
567 hw_sob_put(hl_cs_cmpl->hw_sob);
569 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
570 hdev->asic_funcs->reset_sob_group(hdev,
571 hl_cs_cmpl->sob_group);
574 spin_unlock(&hl_cs_cmpl->lock);
577 static void cs_do_release(struct kref *ref)
579 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
580 struct hl_device *hdev = cs->ctx->hdev;
581 struct hl_cs_job *job, *tmp;
582 struct hl_cs_compl *hl_cs_cmpl =
583 container_of(cs->fence, struct hl_cs_compl, base_fence);
585 cs->completed = true;
588 * Although if we reached here it means that all external jobs have
589 * finished, because each one of them took refcnt to CS, we still
590 * need to go over the internal jobs and complete them. Otherwise, we
591 * will have leaked memory and what's worse, the CS object (and
592 * potentially the CTX object) could be released, while the JOB
593 * still holds a pointer to them (but no reference).
595 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
596 complete_job(hdev, job);
598 if (!cs->submitted) {
600 * In case the wait for signal CS was submitted, the fence put
601 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
602 * right before hanging on the PQ.
604 if (cs->type == CS_TYPE_WAIT ||
605 cs->type == CS_TYPE_COLLECTIVE_WAIT)
606 hl_fence_put(cs->signal_fence);
611 /* Need to update CI for all queue jobs that does not get completion */
612 hl_hw_queue_update_ci(cs);
614 /* remove CS from CS mirror list */
615 spin_lock(&hdev->cs_mirror_lock);
616 list_del_init(&cs->mirror_node);
617 spin_unlock(&hdev->cs_mirror_lock);
619 cs_handle_tdr(hdev, cs);
622 /* the completion CS decrements reference for the entire
625 if (cs->staged_last) {
626 struct hl_cs *staged_cs, *tmp;
628 list_for_each_entry_safe(staged_cs, tmp,
629 &cs->staged_cs_node, staged_cs_node)
630 staged_cs_put(hdev, staged_cs);
633 /* A staged CS will be a member in the list only after it
634 * was submitted. We used 'cs_mirror_lock' when inserting
635 * it to list so we will use it again when removing it
638 spin_lock(&hdev->cs_mirror_lock);
639 list_del(&cs->staged_cs_node);
640 spin_unlock(&hdev->cs_mirror_lock);
643 /* decrement refcount to handle when first staged cs
644 * with encaps signals is completed.
646 if (hl_cs_cmpl->encaps_signals)
647 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
648 hl_encaps_handle_do_release);
651 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
652 && cs->encaps_signals)
653 kref_put(&cs->encaps_sig_hdl->refcount,
654 hl_encaps_handle_do_release);
657 /* Must be called before hl_ctx_put because inside we use ctx to get
660 hl_debugfs_remove_cs(cs);
664 /* We need to mark an error for not submitted because in that case
665 * the hl fence release flow is different. Mainly, we don't need
666 * to handle hw_sob for signal/wait
669 cs->fence->error = -ETIMEDOUT;
670 else if (cs->aborted)
671 cs->fence->error = -EIO;
672 else if (!cs->submitted)
673 cs->fence->error = -EBUSY;
675 if (unlikely(cs->skip_reset_on_timeout)) {
677 "Command submission %llu completed after %llu (s)\n",
679 div_u64(jiffies - cs->submission_time_jiffies, HZ));
683 cs->fence->timestamp = ktime_get();
684 complete_all(&cs->fence->completion);
685 complete_multi_cs(hdev, cs);
687 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
689 hl_fence_put(cs->fence);
691 kfree(cs->jobs_in_queue_cnt);
695 static void cs_timedout(struct work_struct *work)
697 struct hl_device *hdev;
699 struct hl_cs *cs = container_of(work, struct hl_cs,
701 bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
703 rc = cs_get_unless_zero(cs);
707 if ((!cs->submitted) || (cs->completed)) {
712 /* Mark the CS is timed out so we won't try to cancel its TDR */
713 if (likely(!skip_reset_on_timeout))
716 hdev = cs->ctx->hdev;
721 "Signal command submission %llu has not finished in time!\n",
727 "Wait command submission %llu has not finished in time!\n",
731 case CS_TYPE_COLLECTIVE_WAIT:
733 "Collective Wait command submission %llu has not finished in time!\n",
739 "Command submission %llu has not finished in time!\n",
744 rc = hl_state_dump(hdev);
746 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
750 if (likely(!skip_reset_on_timeout)) {
751 if (hdev->reset_on_lockup)
752 hl_device_reset(hdev, HL_RESET_TDR);
754 hdev->needs_reset = true;
758 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
759 enum hl_cs_type cs_type, u64 user_sequence,
760 struct hl_cs **cs_new, u32 flags, u32 timeout)
762 struct hl_cs_counters_atomic *cntr;
763 struct hl_fence *other = NULL;
764 struct hl_cs_compl *cs_cmpl;
768 cntr = &hdev->aggregated_cs_counters;
770 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
772 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
775 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
776 atomic64_inc(&cntr->out_of_mem_drop_cnt);
780 /* increment refcnt for context */
781 hl_ctx_get(hdev, ctx);
784 cs->submitted = false;
785 cs->completed = false;
787 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
788 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
789 cs->timeout_jiffies = timeout;
790 cs->skip_reset_on_timeout =
791 hdev->skip_reset_on_timeout ||
792 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
793 cs->submission_time_jiffies = jiffies;
794 INIT_LIST_HEAD(&cs->job_list);
795 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
796 kref_init(&cs->refcount);
797 spin_lock_init(&cs->job_lock);
799 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
801 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
804 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
805 atomic64_inc(&cntr->out_of_mem_drop_cnt);
810 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
811 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
812 if (!cs->jobs_in_queue_cnt)
813 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
814 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
816 if (!cs->jobs_in_queue_cnt) {
817 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
818 atomic64_inc(&cntr->out_of_mem_drop_cnt);
823 cs_cmpl->hdev = hdev;
824 cs_cmpl->type = cs->type;
825 spin_lock_init(&cs_cmpl->lock);
826 cs->fence = &cs_cmpl->base_fence;
828 spin_lock(&ctx->cs_lock);
830 cs_cmpl->cs_seq = ctx->cs_sequence;
831 other = ctx->cs_pending[cs_cmpl->cs_seq &
832 (hdev->asic_prop.max_pending_cs - 1)];
834 if (other && !completion_done(&other->completion)) {
835 /* If the following statement is true, it means we have reached
836 * a point in which only part of the staged submission was
837 * submitted and we don't have enough room in the 'cs_pending'
838 * array for the rest of the submission.
839 * This causes a deadlock because this CS will never be
840 * completed as it depends on future CS's for completion.
842 if (other->cs_sequence == user_sequence)
843 dev_crit_ratelimited(hdev->dev,
844 "Staged CS %llu deadlock due to lack of resources",
847 dev_dbg_ratelimited(hdev->dev,
848 "Rejecting CS because of too many in-flights CS\n");
849 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
850 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
856 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
858 cs->sequence = cs_cmpl->cs_seq;
860 ctx->cs_pending[cs_cmpl->cs_seq &
861 (hdev->asic_prop.max_pending_cs - 1)] =
862 &cs_cmpl->base_fence;
865 hl_fence_get(&cs_cmpl->base_fence);
869 spin_unlock(&ctx->cs_lock);
876 spin_unlock(&ctx->cs_lock);
877 kfree(cs->jobs_in_queue_cnt);
886 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
888 struct hl_cs_job *job, *tmp;
890 staged_cs_put(hdev, cs);
892 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
893 complete_job(hdev, job);
896 void hl_cs_rollback_all(struct hl_device *hdev)
899 struct hl_cs *cs, *tmp;
901 flush_workqueue(hdev->sob_reset_wq);
903 /* flush all completions before iterating over the CS mirror list in
904 * order to avoid a race with the release functions
906 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
907 flush_workqueue(hdev->cq_wq[i]);
909 /* Make sure we don't have leftovers in the CS mirror list */
910 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
913 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
914 cs->ctx->asid, cs->sequence);
915 cs_rollback(hdev, cs);
919 force_complete_multi_cs(hdev);
923 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
925 struct hl_user_pending_interrupt *pend;
928 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
929 list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
930 pend->fence.error = -EIO;
931 complete_all(&pend->fence.completion);
933 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
936 void hl_release_pending_user_interrupts(struct hl_device *hdev)
938 struct asic_fixed_properties *prop = &hdev->asic_prop;
939 struct hl_user_interrupt *interrupt;
942 if (!prop->user_interrupt_count)
945 /* We iterate through the user interrupt requests and waking up all
946 * user threads waiting for interrupt completion. We iterate the
947 * list under a lock, this is why all user threads, once awake,
948 * will wait on the same lock and will release the waiting object upon
952 for (i = 0 ; i < prop->user_interrupt_count ; i++) {
953 interrupt = &hdev->user_interrupt[i];
954 wake_pending_user_interrupt_threads(interrupt);
957 interrupt = &hdev->common_user_interrupt;
958 wake_pending_user_interrupt_threads(interrupt);
961 static void job_wq_completion(struct work_struct *work)
963 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
965 struct hl_cs *cs = job->cs;
966 struct hl_device *hdev = cs->ctx->hdev;
968 /* job is no longer needed */
969 complete_job(hdev, job);
972 static int validate_queue_index(struct hl_device *hdev,
973 struct hl_cs_chunk *chunk,
974 enum hl_queue_type *queue_type,
975 bool *is_kernel_allocated_cb)
977 struct asic_fixed_properties *asic = &hdev->asic_prop;
978 struct hw_queue_properties *hw_queue_prop;
980 /* This must be checked here to prevent out-of-bounds access to
981 * hw_queues_props array
983 if (chunk->queue_index >= asic->max_queues) {
984 dev_err(hdev->dev, "Queue index %d is invalid\n",
989 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
991 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
992 dev_err(hdev->dev, "Queue index %d is invalid\n",
997 if (hw_queue_prop->driver_only) {
999 "Queue index %d is restricted for the kernel driver\n",
1000 chunk->queue_index);
1004 /* When hw queue type isn't QUEUE_TYPE_HW,
1005 * USER_ALLOC_CB flag shall be referred as "don't care".
1007 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1008 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1009 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1011 "Queue index %d doesn't support user CB\n",
1012 chunk->queue_index);
1016 *is_kernel_allocated_cb = false;
1018 if (!(hw_queue_prop->cb_alloc_flags &
1021 "Queue index %d doesn't support kernel CB\n",
1022 chunk->queue_index);
1026 *is_kernel_allocated_cb = true;
1029 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1033 *queue_type = hw_queue_prop->type;
1037 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1038 struct hl_cb_mgr *cb_mgr,
1039 struct hl_cs_chunk *chunk)
1044 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
1046 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
1048 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
1052 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1053 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1057 atomic_inc(&cb->cs_cnt);
1066 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1067 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1069 struct hl_cs_job *job;
1071 job = kzalloc(sizeof(*job), GFP_ATOMIC);
1073 job = kzalloc(sizeof(*job), GFP_KERNEL);
1078 kref_init(&job->refcount);
1079 job->queue_type = queue_type;
1080 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1082 if (is_cb_patched(hdev, job))
1083 INIT_LIST_HEAD(&job->userptr_list);
1085 if (job->queue_type == QUEUE_TYPE_EXT)
1086 INIT_WORK(&job->finish_work, job_wq_completion);
1091 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1093 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1094 return CS_TYPE_SIGNAL;
1095 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1096 return CS_TYPE_WAIT;
1097 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1098 return CS_TYPE_COLLECTIVE_WAIT;
1099 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1100 return CS_RESERVE_SIGNALS;
1101 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1102 return CS_UNRESERVE_SIGNALS;
1104 return CS_TYPE_DEFAULT;
1107 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1109 struct hl_device *hdev = hpriv->hdev;
1110 struct hl_ctx *ctx = hpriv->ctx;
1111 u32 cs_type_flags, num_chunks;
1112 enum hl_device_status status;
1113 enum hl_cs_type cs_type;
1115 if (!hl_device_operational(hdev, &status)) {
1116 dev_warn_ratelimited(hdev->dev,
1117 "Device is %s. Can't submit new CS\n",
1118 hdev->status[status]);
1122 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1123 !hdev->supports_staged_submission) {
1124 dev_err(hdev->dev, "staged submission not supported");
1128 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1130 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1132 "CS type flags are mutually exclusive, context %d\n",
1137 cs_type = hl_cs_get_cs_type(cs_type_flags);
1138 num_chunks = args->in.num_chunks_execute;
1140 if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
1141 !hdev->supports_sync_stream)) {
1142 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1146 if (cs_type == CS_TYPE_DEFAULT) {
1149 "Got execute CS with 0 chunks, context %d\n",
1153 } else if (num_chunks != 1) {
1155 "Sync stream CS mandates one chunk only, context %d\n",
1163 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1164 struct hl_cs_chunk **cs_chunk_array,
1165 void __user *chunks, u32 num_chunks,
1170 if (num_chunks > HL_MAX_JOBS_PER_CS) {
1171 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1172 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1174 "Number of chunks can NOT be larger than %d\n",
1175 HL_MAX_JOBS_PER_CS);
1179 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1181 if (!*cs_chunk_array)
1182 *cs_chunk_array = kmalloc_array(num_chunks,
1183 sizeof(**cs_chunk_array), GFP_KERNEL);
1184 if (!*cs_chunk_array) {
1185 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1186 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1190 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1191 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1192 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1193 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1194 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1195 kfree(*cs_chunk_array);
1202 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1203 u64 sequence, u32 flags,
1204 u32 encaps_signal_handle)
1206 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1209 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1210 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1212 if (cs->staged_first) {
1213 /* Staged CS sequence is the first CS sequence */
1214 INIT_LIST_HEAD(&cs->staged_cs_node);
1215 cs->staged_sequence = cs->sequence;
1217 if (cs->encaps_signals)
1218 cs->encaps_sig_hdl_id = encaps_signal_handle;
1220 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1221 * under the cs_mirror_lock
1223 cs->staged_sequence = sequence;
1226 /* Increment CS reference if needed */
1227 staged_cs_get(hdev, cs);
1229 cs->staged_cs = true;
1234 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1238 for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1239 if (qid == hdev->stream_master_qid_arr[i])
1245 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1246 u32 num_chunks, u64 *cs_seq, u32 flags,
1247 u32 encaps_signals_handle, u32 timeout)
1249 bool staged_mid, int_queues_only = true;
1250 struct hl_device *hdev = hpriv->hdev;
1251 struct hl_cs_chunk *cs_chunk_array;
1252 struct hl_cs_counters_atomic *cntr;
1253 struct hl_ctx *ctx = hpriv->ctx;
1254 struct hl_cs_job *job;
1258 u8 stream_master_qid_map = 0;
1261 cntr = &hdev->aggregated_cs_counters;
1262 user_sequence = *cs_seq;
1263 *cs_seq = ULLONG_MAX;
1265 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1270 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1271 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1276 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1277 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1280 goto free_cs_chunk_array;
1282 *cs_seq = cs->sequence;
1284 hl_debugfs_add_cs(cs);
1286 rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1287 encaps_signals_handle);
1289 goto free_cs_object;
1291 /* Validate ALL the CS chunks before submitting the CS */
1292 for (i = 0 ; i < num_chunks ; i++) {
1293 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1294 enum hl_queue_type queue_type;
1295 bool is_kernel_allocated_cb;
1297 rc = validate_queue_index(hdev, chunk, &queue_type,
1298 &is_kernel_allocated_cb);
1300 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1301 atomic64_inc(&cntr->validation_drop_cnt);
1302 goto free_cs_object;
1305 if (is_kernel_allocated_cb) {
1306 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
1309 &ctx->cs_counters.validation_drop_cnt);
1310 atomic64_inc(&cntr->validation_drop_cnt);
1312 goto free_cs_object;
1315 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1318 if (queue_type == QUEUE_TYPE_EXT ||
1319 queue_type == QUEUE_TYPE_HW) {
1320 int_queues_only = false;
1323 * store which stream are being used for external/HW
1326 if (hdev->supports_wait_for_multi_cs)
1327 stream_master_qid_map |=
1328 get_stream_master_qid_mask(hdev,
1329 chunk->queue_index);
1332 job = hl_cs_allocate_job(hdev, queue_type,
1333 is_kernel_allocated_cb);
1335 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1336 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1337 dev_err(hdev->dev, "Failed to allocate a new job\n");
1339 if (is_kernel_allocated_cb)
1342 goto free_cs_object;
1348 job->user_cb_size = chunk->cb_size;
1349 job->hw_queue_id = chunk->queue_index;
1351 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1353 list_add_tail(&job->cs_node, &cs->job_list);
1356 * Increment CS reference. When CS reference is 0, CS is
1357 * done and can be signaled to user and free all its resources
1358 * Only increment for JOB on external or H/W queues, because
1359 * only for those JOBs we get completion
1361 if (cs_needs_completion(cs) &&
1362 (job->queue_type == QUEUE_TYPE_EXT ||
1363 job->queue_type == QUEUE_TYPE_HW))
1366 hl_debugfs_add_job(hdev, job);
1368 rc = cs_parser(hpriv, job);
1370 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1371 atomic64_inc(&cntr->parsing_drop_cnt);
1373 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1374 cs->ctx->asid, cs->sequence, job->id, rc);
1375 goto free_cs_object;
1379 /* We allow a CS with any queue type combination as long as it does
1380 * not get a completion
1382 if (int_queues_only && cs_needs_completion(cs)) {
1383 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1384 atomic64_inc(&cntr->validation_drop_cnt);
1386 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1387 cs->ctx->asid, cs->sequence);
1389 goto free_cs_object;
1393 * store the (external/HW queues) streams used by the CS in the
1394 * fence object for multi-CS completion
1396 if (hdev->supports_wait_for_multi_cs)
1397 cs->fence->stream_master_qid_map = stream_master_qid_map;
1399 rc = hl_hw_queue_schedule_cs(cs);
1403 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1404 cs->ctx->asid, cs->sequence, rc);
1405 goto free_cs_object;
1408 rc = HL_CS_STATUS_SUCCESS;
1412 atomic_dec(&cb->cs_cnt);
1415 cs_rollback(hdev, cs);
1416 *cs_seq = ULLONG_MAX;
1417 /* The path below is both for good and erroneous exits */
1419 /* We finished with the CS in this function, so put the ref */
1421 free_cs_chunk_array:
1422 kfree(cs_chunk_array);
1427 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1430 struct hl_device *hdev = hpriv->hdev;
1431 struct hl_ctx *ctx = hpriv->ctx;
1432 bool need_soft_reset = false;
1433 int rc = 0, do_ctx_switch;
1434 void __user *chunks;
1435 u32 num_chunks, tmp;
1438 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1440 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1441 mutex_lock(&hpriv->restore_phase_mutex);
1443 if (do_ctx_switch) {
1444 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1446 dev_err_ratelimited(hdev->dev,
1447 "Failed to switch to context %d, rejecting CS! %d\n",
1450 * If we timedout, or if the device is not IDLE
1451 * while we want to do context-switch (-EBUSY),
1452 * we need to soft-reset because QMAN is
1453 * probably stuck. However, we can't call to
1454 * reset here directly because of deadlock, so
1455 * need to do it at the very end of this
1458 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1459 need_soft_reset = true;
1460 mutex_unlock(&hpriv->restore_phase_mutex);
1465 hdev->asic_funcs->restore_phase_topology(hdev);
1467 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1468 num_chunks = args->in.num_chunks_restore;
1472 "Need to run restore phase but restore CS is empty\n");
1475 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1476 cs_seq, 0, 0, hdev->timeout_jiffies);
1479 mutex_unlock(&hpriv->restore_phase_mutex);
1483 "Failed to submit restore CS for context %d (%d)\n",
1488 /* Need to wait for restore completion before execution phase */
1490 enum hl_cs_wait_status status;
1492 ret = _hl_cs_wait_ioctl(hdev, ctx,
1493 jiffies_to_usecs(hdev->timeout_jiffies),
1494 *cs_seq, &status, NULL);
1496 if (ret == -ERESTARTSYS) {
1497 usleep_range(100, 200);
1502 "Restore CS for context %d failed to complete %d\n",
1509 ctx->thread_ctx_switch_wait_token = 1;
1511 } else if (!ctx->thread_ctx_switch_wait_token) {
1512 rc = hl_poll_timeout_memory(hdev,
1513 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1514 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1516 if (rc == -ETIMEDOUT) {
1518 "context switch phase timeout (%d)\n", tmp);
1524 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1525 hl_device_reset(hdev, 0);
1531 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1532 * if the SOB value reaches the max value move to the other SOB reserved
1534 * @hdev: pointer to device structure
1535 * @q_idx: stream queue index
1536 * @hw_sob: the H/W SOB used in this signal CS.
1537 * @count: signals count
1538 * @encaps_sig: tells whether it's reservation for encaps signals or not.
1540 * Note that this function must be called while hw_queues_lock is taken.
1542 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1543 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1546 struct hl_sync_stream_properties *prop;
1547 struct hl_hw_sob *sob = *hw_sob, *other_sob;
1548 u8 other_sob_offset;
1550 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1554 /* check for wraparound */
1555 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1557 * Decrement as we reached the max value.
1558 * The release function won't be called here as we've
1559 * just incremented the refcount right before calling this
1562 hw_sob_put_err(sob);
1565 * check the other sob value, if it still in use then fail
1566 * otherwise make the switch
1568 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1569 other_sob = &prop->hw_sob[other_sob_offset];
1571 if (kref_read(&other_sob->kref) != 1) {
1572 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1578 * next_sob_val always points to the next available signal
1579 * in the sob, so in encaps signals it will be the next one
1580 * after reserving the required amount.
1583 prop->next_sob_val = count + 1;
1585 prop->next_sob_val = count;
1587 /* only two SOBs are currently in use */
1588 prop->curr_sob_offset = other_sob_offset;
1589 *hw_sob = other_sob;
1592 * check if other_sob needs reset, then do it before using it
1593 * for the reservation or the next signal cs.
1594 * we do it here, and for both encaps and regular signal cs
1595 * cases in order to avoid possible races of two kref_put
1596 * of the sob which can occur at the same time if we move the
1597 * sob reset(kref_put) to cs_do_release function.
1598 * in addition, if we have combination of cs signal and
1599 * encaps, and at the point we need to reset the sob there was
1600 * no more reservations and only signal cs keep coming,
1601 * in such case we need signal_cs to put the refcount and
1604 if (other_sob->need_reset)
1605 hw_sob_put(other_sob);
1608 /* set reset indication for the sob */
1609 sob->need_reset = true;
1610 hw_sob_get(other_sob);
1613 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1614 prop->curr_sob_offset, q_idx);
1616 prop->next_sob_val += count;
1622 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1623 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1624 bool encaps_signals)
1626 u64 *signal_seq_arr = NULL;
1627 u32 size_to_copy, signal_seq_arr_len;
1630 if (encaps_signals) {
1631 *signal_seq = chunk->encaps_signal_seq;
1635 signal_seq_arr_len = chunk->num_signal_seq_arr;
1637 /* currently only one signal seq is supported */
1638 if (signal_seq_arr_len != 1) {
1639 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1640 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1642 "Wait for signal CS supports only one signal CS seq\n");
1646 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1647 sizeof(*signal_seq_arr),
1649 if (!signal_seq_arr)
1650 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1651 sizeof(*signal_seq_arr),
1653 if (!signal_seq_arr) {
1654 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1655 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1659 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1660 if (copy_from_user(signal_seq_arr,
1661 u64_to_user_ptr(chunk->signal_seq_arr),
1663 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1664 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1666 "Failed to copy signal seq array from user\n");
1671 /* currently it is guaranteed to have only one signal seq */
1672 *signal_seq = signal_seq_arr[0];
1675 kfree(signal_seq_arr);
1680 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1681 struct hl_ctx *ctx, struct hl_cs *cs,
1682 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1684 struct hl_cs_counters_atomic *cntr;
1685 struct hl_cs_job *job;
1689 cntr = &hdev->aggregated_cs_counters;
1691 job = hl_cs_allocate_job(hdev, q_type, true);
1693 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1694 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1695 dev_err(hdev->dev, "Failed to allocate a new job\n");
1699 if (cs->type == CS_TYPE_WAIT)
1700 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1702 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1704 cb = hl_cb_kernel_create(hdev, cb_size,
1705 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1707 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1708 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1716 atomic_inc(&job->user_cb->cs_cnt);
1717 job->user_cb_size = cb_size;
1718 job->hw_queue_id = q_idx;
1720 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1721 && cs->encaps_signals)
1722 job->encaps_sig_wait_offset = encaps_signal_offset;
1724 * No need in parsing, user CB is the patched CB.
1725 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1726 * the CB idr anymore and to decrement its refcount as it was
1727 * incremented inside hl_cb_kernel_create().
1729 job->patched_cb = job->user_cb;
1730 job->job_cb_size = job->user_cb_size;
1731 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1733 /* increment refcount as for external queues we get completion */
1736 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1738 list_add_tail(&job->cs_node, &cs->job_list);
1740 hl_debugfs_add_job(hdev, job);
1745 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1746 u32 q_idx, u32 count,
1747 u32 *handle_id, u32 *sob_addr,
1750 struct hw_queue_properties *hw_queue_prop;
1751 struct hl_sync_stream_properties *prop;
1752 struct hl_device *hdev = hpriv->hdev;
1753 struct hl_cs_encaps_sig_handle *handle;
1754 struct hl_encaps_signals_mgr *mgr;
1755 struct hl_hw_sob *hw_sob;
1759 if (count >= HL_MAX_SOB_VAL) {
1760 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1766 if (q_idx >= hdev->asic_prop.max_queues) {
1767 dev_err(hdev->dev, "Queue index %d is invalid\n",
1773 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1775 if (!hw_queue_prop->supports_sync_stream) {
1777 "Queue index %d does not support sync stream operations\n",
1783 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1785 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1791 handle->count = count;
1792 mgr = &hpriv->ctx->sig_mgr;
1794 spin_lock(&mgr->lock);
1795 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
1796 spin_unlock(&mgr->lock);
1799 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
1804 handle->id = hdl_id;
1805 handle->q_idx = q_idx;
1806 handle->hdev = hdev;
1807 kref_init(&handle->refcount);
1809 hdev->asic_funcs->hw_queues_lock(hdev);
1811 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1814 * Increment the SOB value by count by user request
1815 * to reserve those signals
1816 * check if the signals amount to reserve is not exceeding the max sob
1817 * value, if yes then switch sob.
1819 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
1822 dev_err(hdev->dev, "Failed to switch SOB\n");
1823 hdev->asic_funcs->hw_queues_unlock(hdev);
1827 /* set the hw_sob to the handle after calling the sob wraparound handler
1828 * since sob could have changed.
1830 handle->hw_sob = hw_sob;
1832 /* store the current sob value for unreserve validity check, and
1833 * signal offset support
1835 handle->pre_sob_val = prop->next_sob_val - handle->count;
1837 *signals_count = prop->next_sob_val;
1838 hdev->asic_funcs->hw_queues_unlock(hdev);
1840 *sob_addr = handle->hw_sob->sob_addr;
1841 *handle_id = hdl_id;
1844 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
1845 hw_sob->sob_id, handle->hw_sob->sob_addr,
1846 prop->next_sob_val - 1, q_idx, hdl_id);
1850 spin_lock(&mgr->lock);
1851 idr_remove(&mgr->handles, hdl_id);
1852 spin_unlock(&mgr->lock);
1859 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
1861 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1862 struct hl_sync_stream_properties *prop;
1863 struct hl_device *hdev = hpriv->hdev;
1864 struct hl_encaps_signals_mgr *mgr;
1865 struct hl_hw_sob *hw_sob;
1866 u32 q_idx, sob_addr;
1869 mgr = &hpriv->ctx->sig_mgr;
1871 spin_lock(&mgr->lock);
1872 encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
1873 if (encaps_sig_hdl) {
1874 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
1875 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
1876 encaps_sig_hdl->count);
1878 hdev->asic_funcs->hw_queues_lock(hdev);
1880 q_idx = encaps_sig_hdl->q_idx;
1881 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1882 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1883 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
1885 /* Check if sob_val got out of sync due to other
1886 * signal submission requests which were handled
1887 * between the reserve-unreserve calls or SOB switch
1888 * upon reaching SOB max value.
1890 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
1891 != prop->next_sob_val ||
1892 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
1893 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
1894 encaps_sig_hdl->pre_sob_val,
1895 (prop->next_sob_val - encaps_sig_hdl->count));
1897 hdev->asic_funcs->hw_queues_unlock(hdev);
1903 * Decrement the SOB value by count by user request
1904 * to unreserve those signals
1906 prop->next_sob_val -= encaps_sig_hdl->count;
1908 hdev->asic_funcs->hw_queues_unlock(hdev);
1912 /* Release the id and free allocated memory of the handle */
1913 idr_remove(&mgr->handles, handle_id);
1914 kfree(encaps_sig_hdl);
1917 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
1920 spin_unlock(&mgr->lock);
1925 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1926 void __user *chunks, u32 num_chunks,
1927 u64 *cs_seq, u32 flags, u32 timeout)
1929 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
1930 bool handle_found = false, is_wait_cs = false,
1931 wait_cs_submitted = false,
1932 cs_encaps_signals = false;
1933 struct hl_cs_chunk *cs_chunk_array, *chunk;
1934 bool staged_cs_with_encaps_signals = false;
1935 struct hw_queue_properties *hw_queue_prop;
1936 struct hl_device *hdev = hpriv->hdev;
1937 struct hl_cs_compl *sig_waitcs_cmpl;
1938 u32 q_idx, collective_engine_id = 0;
1939 struct hl_cs_counters_atomic *cntr;
1940 struct hl_fence *sig_fence = NULL;
1941 struct hl_ctx *ctx = hpriv->ctx;
1942 enum hl_queue_type q_type;
1947 cntr = &hdev->aggregated_cs_counters;
1948 *cs_seq = ULLONG_MAX;
1950 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1955 /* currently it is guaranteed to have only one chunk */
1956 chunk = &cs_chunk_array[0];
1958 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1959 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1960 atomic64_inc(&cntr->validation_drop_cnt);
1961 dev_err(hdev->dev, "Queue index %d is invalid\n",
1962 chunk->queue_index);
1964 goto free_cs_chunk_array;
1967 q_idx = chunk->queue_index;
1968 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1969 q_type = hw_queue_prop->type;
1971 if (!hw_queue_prop->supports_sync_stream) {
1972 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1973 atomic64_inc(&cntr->validation_drop_cnt);
1975 "Queue index %d does not support sync stream operations\n",
1978 goto free_cs_chunk_array;
1981 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1982 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1983 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1984 atomic64_inc(&cntr->validation_drop_cnt);
1986 "Queue index %d is invalid\n", q_idx);
1988 goto free_cs_chunk_array;
1991 collective_engine_id = chunk->collective_engine_id;
1994 is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
1995 cs_type == CS_TYPE_COLLECTIVE_WAIT);
1997 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2000 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2001 ctx, cs_encaps_signals);
2003 goto free_cs_chunk_array;
2005 if (cs_encaps_signals) {
2006 /* check if cs sequence has encapsulated
2012 spin_lock(&ctx->sig_mgr.lock);
2013 idp = &ctx->sig_mgr.handles;
2014 idr_for_each_entry(idp, encaps_sig_hdl, id) {
2015 if (encaps_sig_hdl->cs_seq == signal_seq) {
2016 handle_found = true;
2017 /* get refcount to protect removing
2018 * this handle from idr, needed when
2019 * multiple wait cs are used with offset
2020 * to wait on reserved encaps signals.
2022 kref_get(&encaps_sig_hdl->refcount);
2026 spin_unlock(&ctx->sig_mgr.lock);
2028 if (!handle_found) {
2029 dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2032 goto free_cs_chunk_array;
2035 /* validate also the signal offset value */
2036 if (chunk->encaps_signal_offset >
2037 encaps_sig_hdl->count) {
2038 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2039 chunk->encaps_signal_offset,
2040 encaps_sig_hdl->count);
2042 goto free_cs_chunk_array;
2046 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2047 if (IS_ERR(sig_fence)) {
2048 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2049 atomic64_inc(&cntr->validation_drop_cnt);
2051 "Failed to get signal CS with seq 0x%llx\n",
2053 rc = PTR_ERR(sig_fence);
2054 goto free_cs_chunk_array;
2058 /* signal CS already finished */
2060 goto free_cs_chunk_array;
2064 container_of(sig_fence, struct hl_cs_compl, base_fence);
2066 staged_cs_with_encaps_signals = !!
2067 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2068 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2070 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2071 !staged_cs_with_encaps_signals) {
2072 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2073 atomic64_inc(&cntr->validation_drop_cnt);
2075 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2077 hl_fence_put(sig_fence);
2079 goto free_cs_chunk_array;
2082 if (completion_done(&sig_fence->completion)) {
2083 /* signal CS already finished */
2084 hl_fence_put(sig_fence);
2086 goto free_cs_chunk_array;
2090 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2093 hl_fence_put(sig_fence);
2095 goto free_cs_chunk_array;
2099 * Save the signal CS fence for later initialization right before
2100 * hanging the wait CS on the queue.
2101 * for encaps signals case, we save the cs sequence and handle pointer
2102 * for later initialization.
2105 cs->signal_fence = sig_fence;
2106 /* store the handle pointer, so we don't have to
2107 * look for it again, later on the flow
2108 * when we need to set SOB info in hw_queue.
2110 if (cs->encaps_signals)
2111 cs->encaps_sig_hdl = encaps_sig_hdl;
2114 hl_debugfs_add_cs(cs);
2116 *cs_seq = cs->sequence;
2118 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2119 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2120 q_idx, chunk->encaps_signal_offset);
2121 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2122 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2123 cs, q_idx, collective_engine_id,
2124 chunk->encaps_signal_offset);
2126 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2127 atomic64_inc(&cntr->validation_drop_cnt);
2132 goto free_cs_object;
2134 rc = hl_hw_queue_schedule_cs(cs);
2136 /* In case wait cs failed here, it means the signal cs
2137 * already completed. we want to free all it's related objects
2138 * but we don't want to fail the ioctl.
2142 else if (rc != -EAGAIN)
2144 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2145 ctx->asid, cs->sequence, rc);
2146 goto free_cs_object;
2149 rc = HL_CS_STATUS_SUCCESS;
2151 wait_cs_submitted = true;
2155 cs_rollback(hdev, cs);
2156 *cs_seq = ULLONG_MAX;
2157 /* The path below is both for good and erroneous exits */
2159 /* We finished with the CS in this function, so put the ref */
2161 free_cs_chunk_array:
2162 if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
2164 kref_put(&encaps_sig_hdl->refcount,
2165 hl_encaps_handle_do_release);
2166 kfree(cs_chunk_array);
2171 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2173 union hl_cs_args *args = data;
2174 enum hl_cs_type cs_type = 0;
2175 u64 cs_seq = ULONG_MAX;
2176 void __user *chunks;
2177 u32 num_chunks, flags, timeout,
2178 signals_count = 0, sob_addr = 0, handle_id = 0;
2181 rc = hl_cs_sanity_checks(hpriv, args);
2185 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2189 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2190 ~HL_CS_FLAGS_FORCE_RESTORE);
2191 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2192 num_chunks = args->in.num_chunks_execute;
2193 flags = args->in.cs_flags;
2195 /* In case this is a staged CS, user should supply the CS sequence */
2196 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2197 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2198 cs_seq = args->in.seq;
2200 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2201 ? msecs_to_jiffies(args->in.timeout * 1000)
2202 : hpriv->hdev->timeout_jiffies;
2205 case CS_TYPE_SIGNAL:
2207 case CS_TYPE_COLLECTIVE_WAIT:
2208 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2209 &cs_seq, args->in.cs_flags, timeout);
2211 case CS_RESERVE_SIGNALS:
2212 rc = cs_ioctl_reserve_signals(hpriv,
2213 args->in.encaps_signals_q_idx,
2214 args->in.encaps_signals_count,
2215 &handle_id, &sob_addr, &signals_count);
2217 case CS_UNRESERVE_SIGNALS:
2218 rc = cs_ioctl_unreserve_signals(hpriv,
2219 args->in.encaps_sig_handle_id);
2222 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2224 args->in.encaps_sig_handle_id,
2229 if (rc != -EAGAIN) {
2230 memset(args, 0, sizeof(*args));
2232 if (cs_type == CS_RESERVE_SIGNALS) {
2233 args->out.handle_id = handle_id;
2234 args->out.sob_base_addr_offset = sob_addr;
2235 args->out.count = signals_count;
2237 args->out.seq = cs_seq;
2239 args->out.status = rc;
2245 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2246 enum hl_cs_wait_status *status, u64 timeout_us,
2249 struct hl_device *hdev = ctx->hdev;
2253 if (IS_ERR(fence)) {
2254 rc = PTR_ERR(fence);
2256 dev_notice_ratelimited(hdev->dev,
2257 "Can't wait on CS %llu because current CS is at seq %llu\n",
2258 seq, ctx->cs_sequence);
2264 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2265 seq, ctx->cs_sequence);
2267 *status = CS_WAIT_STATUS_GONE;
2272 completion_rc = completion_done(&fence->completion);
2274 unsigned long timeout;
2276 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2277 timeout_us : usecs_to_jiffies(timeout_us);
2279 wait_for_completion_interruptible_timeout(
2280 &fence->completion, timeout);
2283 if (completion_rc > 0) {
2284 *status = CS_WAIT_STATUS_COMPLETED;
2286 *timestamp = ktime_to_ns(fence->timestamp);
2288 *status = CS_WAIT_STATUS_BUSY;
2291 if (fence->error == -ETIMEDOUT)
2293 else if (fence->error == -EIO)
2300 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2302 * @mcs_data: multi-CS internal data
2304 * @return 0 on success, otherwise non 0 error code
2306 * The function iterates on all CS sequence in the list and set bit in
2307 * completion_bitmap for each completed CS.
2308 * while iterating, the function can extracts the stream map to be later
2309 * used by the waiting function.
2310 * this function shall be called after taking context ref
2312 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
2314 struct hl_fence **fence_ptr = mcs_data->fence_arr;
2315 struct hl_device *hdev = mcs_data->ctx->hdev;
2316 int i, rc, arr_len = mcs_data->arr_len;
2317 u64 *seq_arr = mcs_data->seq_arr;
2318 ktime_t max_ktime, first_cs_time;
2319 enum hl_cs_wait_status status;
2321 memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
2323 /* get all fences under the same lock */
2324 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2329 * set to maximum time to verify timestamp is valid: if at the end
2330 * this value is maintained- no timestamp was updated
2332 max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2333 first_cs_time = max_ktime;
2335 for (i = 0; i < arr_len; i++, fence_ptr++) {
2336 struct hl_fence *fence = *fence_ptr;
2339 * function won't sleep as it is called with timeout 0 (i.e.
2342 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
2346 "wait_for_fence error :%d for CS seq %llu\n",
2351 mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
2353 if (status == CS_WAIT_STATUS_BUSY)
2356 mcs_data->completion_bitmap |= BIT(i);
2359 * best effort to extract timestamp. few notes:
2360 * - if even single fence is gone we cannot extract timestamp
2361 * (as fence not exist anymore)
2362 * - for all completed CSs we take the earliest timestamp.
2363 * for this we have to validate that:
2364 * 1. given timestamp was indeed set
2365 * 2. the timestamp is earliest of all timestamps so far
2368 if (status == CS_WAIT_STATUS_GONE) {
2369 mcs_data->update_ts = false;
2370 mcs_data->gone_cs = true;
2371 } else if (mcs_data->update_ts &&
2372 (ktime_compare(fence->timestamp,
2373 ktime_set(0, 0)) > 0) &&
2374 (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
2375 first_cs_time = fence->timestamp;
2379 hl_fences_put(mcs_data->fence_arr, arr_len);
2381 if (mcs_data->update_ts &&
2382 (ktime_compare(first_cs_time, max_ktime) != 0))
2383 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2388 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2389 u64 timeout_us, u64 seq,
2390 enum hl_cs_wait_status *status, s64 *timestamp)
2392 struct hl_fence *fence;
2398 hl_ctx_get(hdev, ctx);
2400 fence = hl_ctx_get_fence(ctx, seq);
2402 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2403 hl_fence_put(fence);
2410 * hl_wait_multi_cs_completion_init - init completion structure
2412 * @hdev: pointer to habanalabs device structure
2413 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2414 * master QID to wait on
2416 * @return valid completion struct pointer on success, otherwise error pointer
2418 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2419 * the function gets the first available completion (by marking it "used")
2420 * and initialize its values.
2422 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
2423 struct hl_device *hdev,
2424 u8 stream_master_bitmap)
2426 struct multi_cs_completion *mcs_compl;
2429 /* find free multi_cs completion structure */
2430 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2431 mcs_compl = &hdev->multi_cs_completion[i];
2432 spin_lock(&mcs_compl->lock);
2433 if (!mcs_compl->used) {
2434 mcs_compl->used = 1;
2435 mcs_compl->timestamp = 0;
2436 mcs_compl->stream_master_qid_map = stream_master_bitmap;
2437 reinit_completion(&mcs_compl->completion);
2438 spin_unlock(&mcs_compl->lock);
2441 spin_unlock(&mcs_compl->lock);
2444 if (i == MULTI_CS_MAX_USER_CTX) {
2446 "no available multi-CS completion structure\n");
2447 return ERR_PTR(-ENOMEM);
2453 * hl_wait_multi_cs_completion_fini - return completion structure and set as
2456 * @mcs_compl: pointer to the completion structure
2458 static void hl_wait_multi_cs_completion_fini(
2459 struct multi_cs_completion *mcs_compl)
2462 * free completion structure, do it under lock to be in-sync with the
2463 * thread that signals completion
2465 spin_lock(&mcs_compl->lock);
2466 mcs_compl->used = 0;
2467 spin_unlock(&mcs_compl->lock);
2471 * hl_wait_multi_cs_completion - wait for first CS to complete
2473 * @mcs_data: multi-CS internal data
2475 * @return 0 on success, otherwise non 0 error code
2477 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
2479 struct hl_device *hdev = mcs_data->ctx->hdev;
2480 struct multi_cs_completion *mcs_compl;
2483 mcs_compl = hl_wait_multi_cs_completion_init(hdev,
2484 mcs_data->stream_master_qid_map);
2485 if (IS_ERR(mcs_compl))
2486 return PTR_ERR(mcs_compl);
2488 completion_rc = wait_for_completion_interruptible_timeout(
2489 &mcs_compl->completion,
2490 usecs_to_jiffies(mcs_data->timeout_us));
2492 /* update timestamp */
2493 if (completion_rc > 0)
2494 mcs_data->timestamp = mcs_compl->timestamp;
2496 hl_wait_multi_cs_completion_fini(mcs_compl);
2498 mcs_data->wait_status = completion_rc;
2504 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2506 * @hdev: pointer to habanalabs device structure
2508 void hl_multi_cs_completion_init(struct hl_device *hdev)
2510 struct multi_cs_completion *mcs_cmpl;
2513 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2514 mcs_cmpl = &hdev->multi_cs_completion[i];
2516 spin_lock_init(&mcs_cmpl->lock);
2517 init_completion(&mcs_cmpl->completion);
2522 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2524 * @hpriv: pointer to the private data of the fd
2525 * @data: pointer to multi-CS wait ioctl in/out args
2528 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2530 struct hl_device *hdev = hpriv->hdev;
2531 struct multi_cs_data mcs_data = {0};
2532 union hl_wait_cs_args *args = data;
2533 struct hl_ctx *ctx = hpriv->ctx;
2534 struct hl_fence **fence_arr;
2535 void __user *seq_arr;
2541 if (!hdev->supports_wait_for_multi_cs) {
2542 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2546 seq_arr_len = args->in.seq_arr_len;
2548 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2549 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2550 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2554 /* allocate memory for sequence array */
2556 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2560 /* copy CS sequence array from user */
2561 seq_arr = (void __user *) (uintptr_t) args->in.seq;
2562 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2563 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2564 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2569 /* allocate array for the fences */
2570 fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
2576 /* initialize the multi-CS internal data */
2578 mcs_data.seq_arr = cs_seq_arr;
2579 mcs_data.fence_arr = fence_arr;
2580 mcs_data.arr_len = seq_arr_len;
2582 hl_ctx_get(hdev, ctx);
2584 /* poll all CS fences, extract timestamp */
2585 mcs_data.update_ts = true;
2586 rc = hl_cs_poll_fences(&mcs_data);
2588 * skip wait for CS completion when one of the below is true:
2589 * - an error on the poll function
2590 * - one or more CS in the list completed
2591 * - the user called ioctl with timeout 0
2593 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
2596 /* wait (with timeout) for the first CS to be completed */
2597 mcs_data.timeout_us = args->in.timeout_us;
2598 rc = hl_wait_multi_cs_completion(&mcs_data);
2602 if (mcs_data.wait_status > 0) {
2604 * poll fences once again to update the CS map.
2605 * no timestamp should be updated this time.
2607 mcs_data.update_ts = false;
2608 rc = hl_cs_poll_fences(&mcs_data);
2611 * if hl_wait_multi_cs_completion returned before timeout (i.e.
2612 * it got a completion) we expect to see at least one CS
2613 * completed after the poll function.
2615 if (!mcs_data.completion_bitmap) {
2616 dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
2628 /* update output args */
2629 memset(args, 0, sizeof(*args));
2633 if (mcs_data.completion_bitmap) {
2634 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2635 args->out.cs_completion_map = mcs_data.completion_bitmap;
2637 /* if timestamp not 0- it's valid */
2638 if (mcs_data.timestamp) {
2639 args->out.timestamp_nsec = mcs_data.timestamp;
2640 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2643 /* update if some CS was gone */
2644 if (mcs_data.timestamp)
2645 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2646 } else if (mcs_data.wait_status == -ERESTARTSYS) {
2647 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
2649 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2655 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2657 struct hl_device *hdev = hpriv->hdev;
2658 union hl_wait_cs_args *args = data;
2659 enum hl_cs_wait_status status;
2660 u64 seq = args->in.seq;
2664 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
2665 &status, ×tamp);
2667 memset(args, 0, sizeof(*args));
2670 if (rc == -ERESTARTSYS) {
2671 dev_err_ratelimited(hdev->dev,
2672 "user process got signal while waiting for CS handle %llu\n",
2674 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
2676 } else if (rc == -ETIMEDOUT) {
2677 dev_err_ratelimited(hdev->dev,
2678 "CS %llu has timed-out while user process is waiting for it\n",
2680 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
2681 } else if (rc == -EIO) {
2682 dev_err_ratelimited(hdev->dev,
2683 "CS %llu has been aborted while user process is waiting for it\n",
2685 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
2691 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2692 args->out.timestamp_nsec = timestamp;
2696 case CS_WAIT_STATUS_GONE:
2697 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2699 case CS_WAIT_STATUS_COMPLETED:
2700 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2702 case CS_WAIT_STATUS_BUSY:
2704 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2711 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2712 u32 timeout_us, u64 user_address,
2713 u32 target_value, u16 interrupt_offset,
2714 enum hl_cs_wait_status *status)
2716 struct hl_user_pending_interrupt *pend;
2717 struct hl_user_interrupt *interrupt;
2718 unsigned long timeout, flags;
2719 u32 completion_value;
2723 if (timeout_us == U32_MAX)
2724 timeout = timeout_us;
2726 timeout = usecs_to_jiffies(timeout_us);
2728 hl_ctx_get(hdev, ctx);
2730 pend = kmalloc(sizeof(*pend), GFP_KERNEL);
2736 hl_fence_init(&pend->fence, ULONG_MAX);
2738 if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
2739 interrupt = &hdev->common_user_interrupt;
2741 interrupt = &hdev->user_interrupt[interrupt_offset];
2743 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2744 dev_err(hdev->dev, "Failed to copy completion value from user\n");
2749 if (completion_value >= target_value)
2750 *status = CS_WAIT_STATUS_COMPLETED;
2752 *status = CS_WAIT_STATUS_BUSY;
2754 if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
2757 /* Add pending user interrupt to relevant list for the interrupt
2758 * handler to monitor
2760 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2761 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
2762 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2765 /* Wait for interrupt handler to signal completion */
2766 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
2769 /* If timeout did not expire we need to perform the comparison.
2770 * If comparison fails, keep waiting until timeout expires
2772 if (completion_rc > 0) {
2773 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2774 dev_err(hdev->dev, "Failed to copy completion value from user\n");
2777 goto remove_pending_user_interrupt;
2780 if (completion_value >= target_value) {
2781 *status = CS_WAIT_STATUS_COMPLETED;
2783 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2784 reinit_completion(&pend->fence.completion);
2785 timeout = completion_rc;
2787 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2790 } else if (completion_rc == -ERESTARTSYS) {
2791 dev_err_ratelimited(hdev->dev,
2792 "user process got signal while waiting for interrupt ID %d\n",
2793 interrupt->interrupt_id);
2794 *status = HL_WAIT_CS_STATUS_INTERRUPTED;
2797 *status = CS_WAIT_STATUS_BUSY;
2800 remove_pending_user_interrupt:
2801 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2802 list_del(&pend->wait_list_node);
2803 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2812 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2814 u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
2815 struct hl_device *hdev = hpriv->hdev;
2816 struct asic_fixed_properties *prop;
2817 union hl_wait_cs_args *args = data;
2818 enum hl_cs_wait_status status;
2821 prop = &hdev->asic_prop;
2823 if (!prop->user_interrupt_count) {
2824 dev_err(hdev->dev, "no user interrupts allowed");
2829 FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
2831 first_interrupt = prop->first_available_user_msix_interrupt;
2832 last_interrupt = prop->first_available_user_msix_interrupt +
2833 prop->user_interrupt_count - 1;
2835 if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
2836 interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
2837 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
2841 if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
2842 interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
2844 interrupt_offset = interrupt_id - first_interrupt;
2846 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
2847 args->in.interrupt_timeout_us, args->in.addr,
2848 args->in.target, interrupt_offset, &status);
2850 memset(args, 0, sizeof(*args));
2854 dev_err_ratelimited(hdev->dev,
2855 "interrupt_wait_ioctl failed (%d)\n", rc);
2861 case CS_WAIT_STATUS_COMPLETED:
2862 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2864 case CS_WAIT_STATUS_BUSY:
2866 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2873 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2875 union hl_wait_cs_args *args = data;
2876 u32 flags = args->in.flags;
2879 /* If the device is not operational, no point in waiting for any command submission or
2882 if (!hl_device_operational(hpriv->hdev, NULL))
2885 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
2886 rc = hl_interrupt_wait_ioctl(hpriv, data);
2887 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
2888 rc = hl_multi_cs_wait_ioctl(hpriv, data);
2890 rc = hl_cs_wait_ioctl(hpriv, data);