1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2021 HabanaLabs, Ltd.
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
16 HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
17 HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
20 #define MAX_TS_ITER_NUM 100
23 * enum hl_cs_wait_status - cs wait status
24 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
25 * @CS_WAIT_STATUS_COMPLETED: cs completed
26 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
28 enum hl_cs_wait_status {
30 CS_WAIT_STATUS_COMPLETED,
34 static void job_wq_completion(struct work_struct *work);
35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
36 enum hl_cs_wait_status *status, s64 *timestamp);
37 static void cs_do_release(struct kref *ref);
39 static void hl_push_cs_outcome(struct hl_device *hdev,
40 struct hl_cs_outcome_store *outcome_store,
41 u64 seq, ktime_t ts, int error)
43 struct hl_cs_outcome *node;
47 * CS outcome store supports the following operations:
48 * push outcome - store a recent CS outcome in the store
49 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
50 * It uses 2 lists: used list and free list.
51 * It has a pre-allocated amount of nodes, each node stores
52 * a single CS outcome.
53 * Initially, all the nodes are in the free list.
54 * On push outcome, a node (any) is taken from the free list, its
55 * information is filled in, and the node is moved to the used list.
56 * It is possible, that there are no nodes left in the free list.
57 * In this case, we will lose some information about old outcomes. We
58 * will pop the OLDEST node from the used list, and make it free.
59 * On pop, the node is searched for in the used list (using a search
61 * If found, the node is then removed from the used list, and moved
62 * back to the free list. The outcome data that the node contained is
63 * returned back to the user.
66 spin_lock_irqsave(&outcome_store->db_lock, flags);
68 if (list_empty(&outcome_store->free_list)) {
69 node = list_last_entry(&outcome_store->used_list,
70 struct hl_cs_outcome, list_link);
71 hash_del(&node->map_link);
72 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
74 node = list_last_entry(&outcome_store->free_list,
75 struct hl_cs_outcome, list_link);
78 list_del_init(&node->list_link);
84 list_add(&node->list_link, &outcome_store->used_list);
85 hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
87 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
90 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
91 u64 seq, ktime_t *ts, int *error)
93 struct hl_cs_outcome *node;
96 spin_lock_irqsave(&outcome_store->db_lock, flags);
98 hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
99 if (node->seq == seq) {
101 *error = node->error;
103 hash_del(&node->map_link);
104 list_del_init(&node->list_link);
105 list_add(&node->list_link, &outcome_store->free_list);
107 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
112 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
117 static void hl_sob_reset(struct kref *ref)
119 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
121 struct hl_device *hdev = hw_sob->hdev;
123 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
125 hdev->asic_funcs->reset_sob(hdev, hw_sob);
127 hw_sob->need_reset = false;
130 void hl_sob_reset_error(struct kref *ref)
132 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
134 struct hl_device *hdev = hw_sob->hdev;
137 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
138 hw_sob->q_idx, hw_sob->sob_id);
141 void hw_sob_put(struct hl_hw_sob *hw_sob)
144 kref_put(&hw_sob->kref, hl_sob_reset);
147 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
150 kref_put(&hw_sob->kref, hl_sob_reset_error);
153 void hw_sob_get(struct hl_hw_sob *hw_sob)
156 kref_get(&hw_sob->kref);
160 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
161 * @sob_base: sob base id
162 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
163 * @mask: generated mask
165 * Return: 0 if given parameters are valid
167 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
174 if (sob_mask == 0x1) {
175 *mask = ~(1 << (sob_base & 0x7));
177 /* find msb in order to verify sob range is valid */
178 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
179 if (BIT(i) & sob_mask)
182 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
191 static void hl_fence_release(struct kref *kref)
193 struct hl_fence *fence =
194 container_of(kref, struct hl_fence, refcount);
195 struct hl_cs_compl *hl_cs_cmpl =
196 container_of(fence, struct hl_cs_compl, base_fence);
201 void hl_fence_put(struct hl_fence *fence)
203 if (IS_ERR_OR_NULL(fence))
205 kref_put(&fence->refcount, hl_fence_release);
208 void hl_fences_put(struct hl_fence **fence, int len)
212 for (i = 0; i < len; i++, fence++)
213 hl_fence_put(*fence);
216 void hl_fence_get(struct hl_fence *fence)
219 kref_get(&fence->refcount);
222 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
224 kref_init(&fence->refcount);
225 fence->cs_sequence = sequence;
227 fence->timestamp = ktime_set(0, 0);
228 fence->mcs_handling_done = false;
229 init_completion(&fence->completion);
232 void cs_get(struct hl_cs *cs)
234 kref_get(&cs->refcount);
237 static int cs_get_unless_zero(struct hl_cs *cs)
239 return kref_get_unless_zero(&cs->refcount);
242 static void cs_put(struct hl_cs *cs)
244 kref_put(&cs->refcount, cs_do_release);
247 static void cs_job_do_release(struct kref *ref)
249 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
254 static void hl_cs_job_put(struct hl_cs_job *job)
256 kref_put(&job->refcount, cs_job_do_release);
259 bool cs_needs_completion(struct hl_cs *cs)
261 /* In case this is a staged CS, only the last CS in sequence should
262 * get a completion, any non staged CS will always get a completion
264 if (cs->staged_cs && !cs->staged_last)
270 bool cs_needs_timeout(struct hl_cs *cs)
272 /* In case this is a staged CS, only the first CS in sequence should
273 * get a timeout, any non staged CS will always get a timeout
275 if (cs->staged_cs && !cs->staged_first)
281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
284 * Patched CB is created for external queues jobs, and for H/W queues
285 * jobs if the user CB was allocated by driver and MMU is disabled.
287 return (job->queue_type == QUEUE_TYPE_EXT ||
288 (job->queue_type == QUEUE_TYPE_HW &&
289 job->is_kernel_allocated_cb &&
294 * cs_parser - parse the user command submission
296 * @hpriv : pointer to the private data of the fd
297 * @job : pointer to the job that holds the command submission info
299 * The function parses the command submission of the user. It calls the
300 * ASIC specific parser, which returns a list of memory blocks to send
301 * to the device as different command buffers
304 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
306 struct hl_device *hdev = hpriv->hdev;
307 struct hl_cs_parser parser;
310 parser.ctx_id = job->cs->ctx->asid;
311 parser.cs_sequence = job->cs->sequence;
312 parser.job_id = job->id;
314 parser.hw_queue_id = job->hw_queue_id;
315 parser.job_userptr_list = &job->userptr_list;
316 parser.patched_cb = NULL;
317 parser.user_cb = job->user_cb;
318 parser.user_cb_size = job->user_cb_size;
319 parser.queue_type = job->queue_type;
320 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
321 job->patched_cb = NULL;
322 parser.completion = cs_needs_completion(job->cs);
324 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
326 if (is_cb_patched(hdev, job)) {
328 job->patched_cb = parser.patched_cb;
329 job->job_cb_size = parser.patched_cb_size;
330 job->contains_dma_pkt = parser.contains_dma_pkt;
331 atomic_inc(&job->patched_cb->cs_cnt);
335 * Whether the parsing worked or not, we don't need the
336 * original CB anymore because it was already parsed and
337 * won't be accessed again for this CS
339 atomic_dec(&job->user_cb->cs_cnt);
340 hl_cb_put(job->user_cb);
343 job->job_cb_size = job->user_cb_size;
349 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
351 struct hl_cs *cs = job->cs;
353 if (is_cb_patched(hdev, job)) {
354 hl_userptr_delete_list(hdev, &job->userptr_list);
357 * We might arrive here from rollback and patched CB wasn't
358 * created, so we need to check it's not NULL
360 if (job->patched_cb) {
361 atomic_dec(&job->patched_cb->cs_cnt);
362 hl_cb_put(job->patched_cb);
366 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
367 * enabled, the user CB isn't released in cs_parser() and thus should be
368 * released here. This is also true for INT queues jobs which were
369 * allocated by driver.
371 if ((job->is_kernel_allocated_cb &&
372 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
373 job->queue_type == QUEUE_TYPE_INT))) {
374 atomic_dec(&job->user_cb->cs_cnt);
375 hl_cb_put(job->user_cb);
379 * This is the only place where there can be multiple threads
380 * modifying the list at the same time
382 spin_lock(&cs->job_lock);
383 list_del(&job->cs_node);
384 spin_unlock(&cs->job_lock);
386 hl_debugfs_remove_job(hdev, job);
388 /* We decrement reference only for a CS that gets completion
389 * because the reference was incremented only for this kind of CS
390 * right before it was scheduled.
392 * In staged submission, only the last CS marked as 'staged_last'
393 * gets completion, hence its release function will be called from here.
394 * As for all the rest CS's in the staged submission which do not get
395 * completion, their CS reference will be decremented by the
396 * 'staged_last' CS during the CS release flow.
397 * All relevant PQ CI counters will be incremented during the CS release
398 * flow by calling 'hl_hw_queue_update_ci'.
400 if (cs_needs_completion(cs) &&
401 (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
403 /* In CS based completions, the timestamp is already available,
404 * so no need to extract it from job
406 if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
407 cs->completion_timestamp = job->timestamp;
416 * hl_staged_cs_find_first - locate the first CS in this staged submission
418 * @hdev: pointer to device structure
419 * @cs_seq: staged submission sequence number
421 * @note: This function must be called under 'hdev->cs_mirror_lock'
423 * Find and return a CS pointer with the given sequence
425 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
429 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
430 if (cs->staged_cs && cs->staged_first &&
431 cs->sequence == cs_seq)
438 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
440 * @hdev: pointer to device structure
441 * @cs: staged submission member
444 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
446 struct hl_cs *last_entry;
448 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
451 if (last_entry->staged_last)
458 * staged_cs_get - get CS reference if this CS is a part of a staged CS
460 * @hdev: pointer to device structure
462 * @cs_seq: staged submission sequence number
464 * Increment CS reference for every CS in this staged submission except for
465 * the CS which get completion.
467 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
469 /* Only the last CS in this staged submission will get a completion.
470 * We must increment the reference for all other CS's in this
472 * Once we get a completion we will release the whole staged submission.
474 if (!cs->staged_last)
479 * staged_cs_put - put a CS in case it is part of staged submission
481 * @hdev: pointer to device structure
484 * This function decrements a CS reference (for a non completion CS)
486 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
488 /* We release all CS's in a staged submission except the last
489 * CS which we have never incremented its reference.
491 if (!cs_needs_completion(cs))
495 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
497 struct hl_cs *next = NULL, *iter, *first_cs;
499 if (!cs_needs_timeout(cs))
502 spin_lock(&hdev->cs_mirror_lock);
504 /* We need to handle tdr only once for the complete staged submission.
505 * Hence, we choose the CS that reaches this function first which is
506 * the CS marked as 'staged_last'.
507 * In case single staged cs was submitted which has both first and last
508 * indications, then "cs_find_first" below will return NULL, since we
509 * removed the cs node from the list before getting here,
510 * in such cases just continue with the cs to cancel it's TDR work.
512 if (cs->staged_cs && cs->staged_last) {
513 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
518 spin_unlock(&hdev->cs_mirror_lock);
520 /* Don't cancel TDR in case this CS was timedout because we might be
521 * running from the TDR context
523 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
527 cancel_delayed_work_sync(&cs->work_tdr);
529 spin_lock(&hdev->cs_mirror_lock);
531 /* queue TDR for next CS */
532 list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
533 if (cs_needs_timeout(iter)) {
538 if (next && !next->tdr_active) {
539 next->tdr_active = true;
540 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
543 spin_unlock(&hdev->cs_mirror_lock);
547 * force_complete_multi_cs - complete all contexts that wait on multi-CS
549 * @hdev: pointer to habanalabs device structure
551 static void force_complete_multi_cs(struct hl_device *hdev)
555 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
556 struct multi_cs_completion *mcs_compl;
558 mcs_compl = &hdev->multi_cs_completion[i];
560 spin_lock(&mcs_compl->lock);
562 if (!mcs_compl->used) {
563 spin_unlock(&mcs_compl->lock);
567 /* when calling force complete no context should be waiting on
569 * We are calling the function as a protection for such case
570 * to free any pending context and print error message
573 "multi-CS completion context %d still waiting when calling force completion\n",
575 complete_all(&mcs_compl->completion);
576 spin_unlock(&mcs_compl->lock);
581 * complete_multi_cs - complete all waiting entities on multi-CS
583 * @hdev: pointer to habanalabs device structure
585 * The function signals a waiting entity that has an overlapping stream masters
586 * with the completed CS.
588 * - a completed CS worked on stream master QID 4, multi CS completion
589 * is actively waiting on stream master QIDs 3, 5. don't send signal as no
590 * common stream master QID
591 * - a completed CS worked on stream master QID 4, multi CS completion
592 * is actively waiting on stream master QIDs 3, 4. send signal as stream
593 * master QID 4 is common
595 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
597 struct hl_fence *fence = cs->fence;
600 /* in case of multi CS check for completion only for the first CS */
601 if (cs->staged_cs && !cs->staged_first)
604 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
605 struct multi_cs_completion *mcs_compl;
607 mcs_compl = &hdev->multi_cs_completion[i];
608 if (!mcs_compl->used)
611 spin_lock(&mcs_compl->lock);
615 * 1. still waiting for completion
616 * 2. the completed CS has at least one overlapping stream
617 * master with the stream masters in the completion
619 if (mcs_compl->used &&
620 (fence->stream_master_qid_map &
621 mcs_compl->stream_master_qid_map)) {
622 /* extract the timestamp only of first completed CS */
623 if (!mcs_compl->timestamp)
624 mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
626 complete_all(&mcs_compl->completion);
629 * Setting mcs_handling_done inside the lock ensures
630 * at least one fence have mcs_handling_done set to
631 * true before wait for mcs finish. This ensures at
632 * least one CS will be set as completed when polling
635 fence->mcs_handling_done = true;
638 spin_unlock(&mcs_compl->lock);
640 /* In case CS completed without mcs completion initialized */
641 fence->mcs_handling_done = true;
644 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
646 struct hl_cs_compl *hl_cs_cmpl)
648 /* Skip this handler if the cs wasn't submitted, to avoid putting
649 * the hw_sob twice, since this case already handled at this point,
650 * also skip if the hw_sob pointer wasn't set.
652 if (!hl_cs_cmpl->hw_sob || !cs->submitted)
655 spin_lock(&hl_cs_cmpl->lock);
658 * we get refcount upon reservation of signals or signal/wait cs for the
659 * hw_sob object, and need to put it when the first staged cs
660 * (which cotains the encaps signals) or cs signal/wait is completed.
662 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
663 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
664 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
665 (!!hl_cs_cmpl->encaps_signals)) {
667 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
670 hl_cs_cmpl->hw_sob->sob_id,
671 hl_cs_cmpl->sob_val);
673 hw_sob_put(hl_cs_cmpl->hw_sob);
675 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
676 hdev->asic_funcs->reset_sob_group(hdev,
677 hl_cs_cmpl->sob_group);
680 spin_unlock(&hl_cs_cmpl->lock);
683 static void cs_do_release(struct kref *ref)
685 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
686 struct hl_device *hdev = cs->ctx->hdev;
687 struct hl_cs_job *job, *tmp;
688 struct hl_cs_compl *hl_cs_cmpl =
689 container_of(cs->fence, struct hl_cs_compl, base_fence);
691 cs->completed = true;
694 * Although if we reached here it means that all external jobs have
695 * finished, because each one of them took refcnt to CS, we still
696 * need to go over the internal jobs and complete them. Otherwise, we
697 * will have leaked memory and what's worse, the CS object (and
698 * potentially the CTX object) could be released, while the JOB
699 * still holds a pointer to them (but no reference).
701 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
702 hl_complete_job(hdev, job);
704 if (!cs->submitted) {
706 * In case the wait for signal CS was submitted, the fence put
707 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
708 * right before hanging on the PQ.
710 if (cs->type == CS_TYPE_WAIT ||
711 cs->type == CS_TYPE_COLLECTIVE_WAIT)
712 hl_fence_put(cs->signal_fence);
717 /* Need to update CI for all queue jobs that does not get completion */
718 hl_hw_queue_update_ci(cs);
720 /* remove CS from CS mirror list */
721 spin_lock(&hdev->cs_mirror_lock);
722 list_del_init(&cs->mirror_node);
723 spin_unlock(&hdev->cs_mirror_lock);
725 cs_handle_tdr(hdev, cs);
728 /* the completion CS decrements reference for the entire
731 if (cs->staged_last) {
732 struct hl_cs *staged_cs, *tmp_cs;
734 list_for_each_entry_safe(staged_cs, tmp_cs,
735 &cs->staged_cs_node, staged_cs_node)
736 staged_cs_put(hdev, staged_cs);
739 /* A staged CS will be a member in the list only after it
740 * was submitted. We used 'cs_mirror_lock' when inserting
741 * it to list so we will use it again when removing it
744 spin_lock(&hdev->cs_mirror_lock);
745 list_del(&cs->staged_cs_node);
746 spin_unlock(&hdev->cs_mirror_lock);
749 /* decrement refcount to handle when first staged cs
750 * with encaps signals is completed.
752 if (hl_cs_cmpl->encaps_signals)
753 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
754 hl_encaps_release_handle_and_put_ctx);
757 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
758 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
761 /* Must be called before hl_ctx_put because inside we use ctx to get
764 hl_debugfs_remove_cs(cs);
766 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
768 /* We need to mark an error for not submitted because in that case
769 * the hl fence release flow is different. Mainly, we don't need
770 * to handle hw_sob for signal/wait
773 cs->fence->error = -ETIMEDOUT;
774 else if (cs->aborted)
775 cs->fence->error = -EIO;
776 else if (!cs->submitted)
777 cs->fence->error = -EBUSY;
779 if (unlikely(cs->skip_reset_on_timeout)) {
781 "Command submission %llu completed after %llu (s)\n",
783 div_u64(jiffies - cs->submission_time_jiffies, HZ));
787 cs->fence->timestamp = cs->completion_timestamp;
788 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
789 cs->fence->timestamp, cs->fence->error);
794 complete_all(&cs->fence->completion);
795 complete_multi_cs(hdev, cs);
797 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
799 hl_fence_put(cs->fence);
801 kfree(cs->jobs_in_queue_cnt);
805 static void cs_timedout(struct work_struct *work)
807 struct hl_device *hdev;
808 u64 event_mask = 0x0;
810 struct hl_cs *cs = container_of(work, struct hl_cs,
812 bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
814 rc = cs_get_unless_zero(cs);
818 if ((!cs->submitted) || (cs->completed)) {
823 hdev = cs->ctx->hdev;
825 if (likely(!skip_reset_on_timeout)) {
826 if (hdev->reset_on_lockup)
829 hdev->reset_info.needs_reset = true;
831 /* Mark the CS is timed out so we won't try to cancel its TDR */
835 /* Save only the first CS timeout parameters */
836 rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
838 hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
839 hdev->captured_err_info.cs_timeout.seq = cs->sequence;
840 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
846 "Signal command submission %llu has not finished in time!\n",
852 "Wait command submission %llu has not finished in time!\n",
856 case CS_TYPE_COLLECTIVE_WAIT:
858 "Collective Wait command submission %llu has not finished in time!\n",
864 "Command submission %llu has not finished in time!\n",
869 rc = hl_state_dump(hdev);
871 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
876 event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
877 hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
878 } else if (event_mask) {
879 hl_notifier_event_send_all(hdev, event_mask);
883 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
884 enum hl_cs_type cs_type, u64 user_sequence,
885 struct hl_cs **cs_new, u32 flags, u32 timeout)
887 struct hl_cs_counters_atomic *cntr;
888 struct hl_fence *other = NULL;
889 struct hl_cs_compl *cs_cmpl;
893 cntr = &hdev->aggregated_cs_counters;
895 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
897 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
900 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
901 atomic64_inc(&cntr->out_of_mem_drop_cnt);
905 /* increment refcnt for context */
909 cs->submitted = false;
910 cs->completed = false;
912 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
913 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
914 cs->timeout_jiffies = timeout;
915 cs->skip_reset_on_timeout =
916 hdev->reset_info.skip_reset_on_timeout ||
917 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
918 cs->submission_time_jiffies = jiffies;
919 INIT_LIST_HEAD(&cs->job_list);
920 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
921 kref_init(&cs->refcount);
922 spin_lock_init(&cs->job_lock);
924 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
926 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
929 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
930 atomic64_inc(&cntr->out_of_mem_drop_cnt);
935 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
936 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
937 if (!cs->jobs_in_queue_cnt)
938 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
939 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
941 if (!cs->jobs_in_queue_cnt) {
942 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
943 atomic64_inc(&cntr->out_of_mem_drop_cnt);
948 cs_cmpl->hdev = hdev;
949 cs_cmpl->type = cs->type;
950 spin_lock_init(&cs_cmpl->lock);
951 cs->fence = &cs_cmpl->base_fence;
953 spin_lock(&ctx->cs_lock);
955 cs_cmpl->cs_seq = ctx->cs_sequence;
956 other = ctx->cs_pending[cs_cmpl->cs_seq &
957 (hdev->asic_prop.max_pending_cs - 1)];
959 if (other && !completion_done(&other->completion)) {
960 /* If the following statement is true, it means we have reached
961 * a point in which only part of the staged submission was
962 * submitted and we don't have enough room in the 'cs_pending'
963 * array for the rest of the submission.
964 * This causes a deadlock because this CS will never be
965 * completed as it depends on future CS's for completion.
967 if (other->cs_sequence == user_sequence)
968 dev_crit_ratelimited(hdev->dev,
969 "Staged CS %llu deadlock due to lack of resources",
972 dev_dbg_ratelimited(hdev->dev,
973 "Rejecting CS because of too many in-flights CS\n");
974 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
975 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
981 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
983 cs->sequence = cs_cmpl->cs_seq;
985 ctx->cs_pending[cs_cmpl->cs_seq &
986 (hdev->asic_prop.max_pending_cs - 1)] =
987 &cs_cmpl->base_fence;
990 hl_fence_get(&cs_cmpl->base_fence);
994 spin_unlock(&ctx->cs_lock);
1001 spin_unlock(&ctx->cs_lock);
1002 kfree(cs->jobs_in_queue_cnt);
1011 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
1013 struct hl_cs_job *job, *tmp;
1015 staged_cs_put(hdev, cs);
1017 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1018 hl_complete_job(hdev, job);
1022 * release_reserved_encaps_signals() - release reserved encapsulated signals.
1023 * @hdev: pointer to habanalabs device structure
1025 * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
1026 * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
1027 * For these signals need also to put the refcount of the H/W SOB which was taken at the
1030 static void release_reserved_encaps_signals(struct hl_device *hdev)
1032 struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
1033 struct hl_cs_encaps_sig_handle *handle;
1034 struct hl_encaps_signals_mgr *mgr;
1040 mgr = &ctx->sig_mgr;
1042 idr_for_each_entry(&mgr->handles, handle, id)
1043 if (handle->cs_seq == ULLONG_MAX)
1044 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
1049 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
1052 struct hl_cs *cs, *tmp;
1054 if (!skip_wq_flush) {
1055 flush_workqueue(hdev->ts_free_obj_wq);
1057 /* flush all completions before iterating over the CS mirror list in
1058 * order to avoid a race with the release functions
1060 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1061 flush_workqueue(hdev->cq_wq[i]);
1063 flush_workqueue(hdev->cs_cmplt_wq);
1066 /* Make sure we don't have leftovers in the CS mirror list */
1067 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
1070 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
1071 cs->ctx->asid, cs->sequence);
1072 cs_rollback(hdev, cs);
1076 force_complete_multi_cs(hdev);
1078 release_reserved_encaps_signals(hdev);
1082 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
1084 struct hl_user_pending_interrupt *pend, *temp;
1085 unsigned long flags;
1087 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
1088 list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
1089 if (pend->ts_reg_info.buf) {
1090 list_del(&pend->wait_list_node);
1091 hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
1092 hl_cb_put(pend->ts_reg_info.cq_cb);
1094 pend->fence.error = -EIO;
1095 complete_all(&pend->fence.completion);
1098 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
1101 void hl_release_pending_user_interrupts(struct hl_device *hdev)
1103 struct asic_fixed_properties *prop = &hdev->asic_prop;
1104 struct hl_user_interrupt *interrupt;
1107 if (!prop->user_interrupt_count)
1110 /* We iterate through the user interrupt requests and waking up all
1111 * user threads waiting for interrupt completion. We iterate the
1112 * list under a lock, this is why all user threads, once awake,
1113 * will wait on the same lock and will release the waiting object upon
1117 for (i = 0 ; i < prop->user_interrupt_count ; i++) {
1118 interrupt = &hdev->user_interrupt[i];
1119 wake_pending_user_interrupt_threads(interrupt);
1122 interrupt = &hdev->common_user_cq_interrupt;
1123 wake_pending_user_interrupt_threads(interrupt);
1125 interrupt = &hdev->common_decoder_interrupt;
1126 wake_pending_user_interrupt_threads(interrupt);
1129 static void force_complete_cs(struct hl_device *hdev)
1133 spin_lock(&hdev->cs_mirror_lock);
1135 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
1136 cs->fence->error = -EIO;
1137 complete_all(&cs->fence->completion);
1140 spin_unlock(&hdev->cs_mirror_lock);
1143 void hl_abort_waitings_for_completion(struct hl_device *hdev)
1145 force_complete_cs(hdev);
1146 force_complete_multi_cs(hdev);
1147 hl_release_pending_user_interrupts(hdev);
1150 static void job_wq_completion(struct work_struct *work)
1152 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
1154 struct hl_cs *cs = job->cs;
1155 struct hl_device *hdev = cs->ctx->hdev;
1157 /* job is no longer needed */
1158 hl_complete_job(hdev, job);
1161 static void cs_completion(struct work_struct *work)
1163 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
1164 struct hl_device *hdev = cs->ctx->hdev;
1165 struct hl_cs_job *job, *tmp;
1167 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1168 hl_complete_job(hdev, job);
1171 u32 hl_get_active_cs_num(struct hl_device *hdev)
1173 u32 active_cs_num = 0;
1176 spin_lock(&hdev->cs_mirror_lock);
1178 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
1182 spin_unlock(&hdev->cs_mirror_lock);
1184 return active_cs_num;
1187 static int validate_queue_index(struct hl_device *hdev,
1188 struct hl_cs_chunk *chunk,
1189 enum hl_queue_type *queue_type,
1190 bool *is_kernel_allocated_cb)
1192 struct asic_fixed_properties *asic = &hdev->asic_prop;
1193 struct hw_queue_properties *hw_queue_prop;
1195 /* This must be checked here to prevent out-of-bounds access to
1196 * hw_queues_props array
1198 if (chunk->queue_index >= asic->max_queues) {
1199 dev_err(hdev->dev, "Queue index %d is invalid\n",
1200 chunk->queue_index);
1204 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1206 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1207 dev_err(hdev->dev, "Queue index %d is not applicable\n",
1208 chunk->queue_index);
1212 if (hw_queue_prop->binned) {
1213 dev_err(hdev->dev, "Queue index %d is binned out\n",
1214 chunk->queue_index);
1218 if (hw_queue_prop->driver_only) {
1220 "Queue index %d is restricted for the kernel driver\n",
1221 chunk->queue_index);
1225 /* When hw queue type isn't QUEUE_TYPE_HW,
1226 * USER_ALLOC_CB flag shall be referred as "don't care".
1228 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1229 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1230 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1232 "Queue index %d doesn't support user CB\n",
1233 chunk->queue_index);
1237 *is_kernel_allocated_cb = false;
1239 if (!(hw_queue_prop->cb_alloc_flags &
1242 "Queue index %d doesn't support kernel CB\n",
1243 chunk->queue_index);
1247 *is_kernel_allocated_cb = true;
1250 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1254 *queue_type = hw_queue_prop->type;
1258 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1259 struct hl_mem_mgr *mmg,
1260 struct hl_cs_chunk *chunk)
1264 cb = hl_cb_get(mmg, chunk->cb_handle);
1266 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
1270 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1271 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1275 atomic_inc(&cb->cs_cnt);
1284 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1285 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1287 struct hl_cs_job *job;
1289 job = kzalloc(sizeof(*job), GFP_ATOMIC);
1291 job = kzalloc(sizeof(*job), GFP_KERNEL);
1296 kref_init(&job->refcount);
1297 job->queue_type = queue_type;
1298 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1300 if (is_cb_patched(hdev, job))
1301 INIT_LIST_HEAD(&job->userptr_list);
1303 if (job->queue_type == QUEUE_TYPE_EXT)
1304 INIT_WORK(&job->finish_work, job_wq_completion);
1309 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1311 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1312 return CS_TYPE_SIGNAL;
1313 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1314 return CS_TYPE_WAIT;
1315 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1316 return CS_TYPE_COLLECTIVE_WAIT;
1317 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1318 return CS_RESERVE_SIGNALS;
1319 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1320 return CS_UNRESERVE_SIGNALS;
1321 else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
1322 return CS_TYPE_ENGINE_CORE;
1323 else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
1324 return CS_TYPE_FLUSH_PCI_HBW_WRITES;
1326 return CS_TYPE_DEFAULT;
1329 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1331 struct hl_device *hdev = hpriv->hdev;
1332 struct hl_ctx *ctx = hpriv->ctx;
1333 u32 cs_type_flags, num_chunks;
1334 enum hl_device_status status;
1335 enum hl_cs_type cs_type;
1336 bool is_sync_stream;
1339 for (i = 0 ; i < sizeof(args->in.pad) ; i++)
1340 if (args->in.pad[i]) {
1341 dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1345 if (!hl_device_operational(hdev, &status)) {
1349 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1350 !hdev->supports_staged_submission) {
1351 dev_err(hdev->dev, "staged submission not supported");
1355 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1357 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1359 "CS type flags are mutually exclusive, context %d\n",
1364 cs_type = hl_cs_get_cs_type(cs_type_flags);
1365 num_chunks = args->in.num_chunks_execute;
1367 is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
1368 cs_type == CS_TYPE_COLLECTIVE_WAIT);
1370 if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
1371 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1375 if (cs_type == CS_TYPE_DEFAULT) {
1377 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1380 } else if (is_sync_stream && num_chunks != 1) {
1382 "Sync stream CS mandates one chunk only, context %d\n",
1390 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1391 struct hl_cs_chunk **cs_chunk_array,
1392 void __user *chunks, u32 num_chunks,
1397 if (num_chunks > HL_MAX_JOBS_PER_CS) {
1398 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1399 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1401 "Number of chunks can NOT be larger than %d\n",
1402 HL_MAX_JOBS_PER_CS);
1406 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1408 if (!*cs_chunk_array)
1409 *cs_chunk_array = kmalloc_array(num_chunks,
1410 sizeof(**cs_chunk_array), GFP_KERNEL);
1411 if (!*cs_chunk_array) {
1412 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1413 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1417 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1418 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1419 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1420 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1421 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1422 kfree(*cs_chunk_array);
1429 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1430 u64 sequence, u32 flags,
1431 u32 encaps_signal_handle)
1433 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1436 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1437 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1439 if (cs->staged_first) {
1440 /* Staged CS sequence is the first CS sequence */
1441 INIT_LIST_HEAD(&cs->staged_cs_node);
1442 cs->staged_sequence = cs->sequence;
1444 if (cs->encaps_signals)
1445 cs->encaps_sig_hdl_id = encaps_signal_handle;
1447 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1448 * under the cs_mirror_lock
1450 cs->staged_sequence = sequence;
1453 /* Increment CS reference if needed */
1454 staged_cs_get(hdev, cs);
1456 cs->staged_cs = true;
1461 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1465 for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1466 if (qid == hdev->stream_master_qid_arr[i])
1472 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1473 u32 num_chunks, u64 *cs_seq, u32 flags,
1474 u32 encaps_signals_handle, u32 timeout,
1475 u16 *signal_initial_sob_count)
1477 bool staged_mid, int_queues_only = true, using_hw_queues = false;
1478 struct hl_device *hdev = hpriv->hdev;
1479 struct hl_cs_chunk *cs_chunk_array;
1480 struct hl_cs_counters_atomic *cntr;
1481 struct hl_ctx *ctx = hpriv->ctx;
1482 struct hl_cs_job *job;
1486 u8 stream_master_qid_map = 0;
1489 cntr = &hdev->aggregated_cs_counters;
1490 user_sequence = *cs_seq;
1491 *cs_seq = ULLONG_MAX;
1493 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1498 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1499 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1504 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1505 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1508 goto free_cs_chunk_array;
1510 *cs_seq = cs->sequence;
1512 hl_debugfs_add_cs(cs);
1514 rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1515 encaps_signals_handle);
1517 goto free_cs_object;
1519 /* If this is a staged submission we must return the staged sequence
1520 * rather than the internal CS sequence
1523 *cs_seq = cs->staged_sequence;
1525 /* Validate ALL the CS chunks before submitting the CS */
1526 for (i = 0 ; i < num_chunks ; i++) {
1527 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1528 enum hl_queue_type queue_type;
1529 bool is_kernel_allocated_cb;
1531 rc = validate_queue_index(hdev, chunk, &queue_type,
1532 &is_kernel_allocated_cb);
1534 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1535 atomic64_inc(&cntr->validation_drop_cnt);
1536 goto free_cs_object;
1539 if (is_kernel_allocated_cb) {
1540 cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
1543 &ctx->cs_counters.validation_drop_cnt);
1544 atomic64_inc(&cntr->validation_drop_cnt);
1546 goto free_cs_object;
1549 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1552 if (queue_type == QUEUE_TYPE_EXT ||
1553 queue_type == QUEUE_TYPE_HW) {
1554 int_queues_only = false;
1557 * store which stream are being used for external/HW
1560 if (hdev->supports_wait_for_multi_cs)
1561 stream_master_qid_map |=
1562 get_stream_master_qid_mask(hdev,
1563 chunk->queue_index);
1566 if (queue_type == QUEUE_TYPE_HW)
1567 using_hw_queues = true;
1569 job = hl_cs_allocate_job(hdev, queue_type,
1570 is_kernel_allocated_cb);
1572 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1573 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1574 dev_err(hdev->dev, "Failed to allocate a new job\n");
1576 if (is_kernel_allocated_cb)
1579 goto free_cs_object;
1585 job->user_cb_size = chunk->cb_size;
1586 job->hw_queue_id = chunk->queue_index;
1588 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1591 list_add_tail(&job->cs_node, &cs->job_list);
1594 * Increment CS reference. When CS reference is 0, CS is
1595 * done and can be signaled to user and free all its resources
1596 * Only increment for JOB on external or H/W queues, because
1597 * only for those JOBs we get completion
1599 if (cs_needs_completion(cs) &&
1600 (job->queue_type == QUEUE_TYPE_EXT ||
1601 job->queue_type == QUEUE_TYPE_HW))
1604 hl_debugfs_add_job(hdev, job);
1606 rc = cs_parser(hpriv, job);
1608 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1609 atomic64_inc(&cntr->parsing_drop_cnt);
1611 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1612 cs->ctx->asid, cs->sequence, job->id, rc);
1613 goto free_cs_object;
1617 /* We allow a CS with any queue type combination as long as it does
1618 * not get a completion
1620 if (int_queues_only && cs_needs_completion(cs)) {
1621 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1622 atomic64_inc(&cntr->validation_drop_cnt);
1624 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1625 cs->ctx->asid, cs->sequence);
1627 goto free_cs_object;
1630 if (using_hw_queues)
1631 INIT_WORK(&cs->finish_work, cs_completion);
1634 * store the (external/HW queues) streams used by the CS in the
1635 * fence object for multi-CS completion
1637 if (hdev->supports_wait_for_multi_cs)
1638 cs->fence->stream_master_qid_map = stream_master_qid_map;
1640 rc = hl_hw_queue_schedule_cs(cs);
1644 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1645 cs->ctx->asid, cs->sequence, rc);
1646 goto free_cs_object;
1649 *signal_initial_sob_count = cs->initial_sob_count;
1651 rc = HL_CS_STATUS_SUCCESS;
1655 atomic_dec(&cb->cs_cnt);
1658 cs_rollback(hdev, cs);
1659 *cs_seq = ULLONG_MAX;
1660 /* The path below is both for good and erroneous exits */
1662 /* We finished with the CS in this function, so put the ref */
1664 free_cs_chunk_array:
1665 kfree(cs_chunk_array);
1670 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1673 struct hl_device *hdev = hpriv->hdev;
1674 struct hl_ctx *ctx = hpriv->ctx;
1675 bool need_soft_reset = false;
1676 int rc = 0, do_ctx_switch = 0;
1677 void __user *chunks;
1678 u32 num_chunks, tmp;
1682 if (hdev->supports_ctx_switch)
1683 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1685 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1686 mutex_lock(&hpriv->restore_phase_mutex);
1688 if (do_ctx_switch) {
1689 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1691 dev_err_ratelimited(hdev->dev,
1692 "Failed to switch to context %d, rejecting CS! %d\n",
1695 * If we timedout, or if the device is not IDLE
1696 * while we want to do context-switch (-EBUSY),
1697 * we need to soft-reset because QMAN is
1698 * probably stuck. However, we can't call to
1699 * reset here directly because of deadlock, so
1700 * need to do it at the very end of this
1703 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1704 need_soft_reset = true;
1705 mutex_unlock(&hpriv->restore_phase_mutex);
1710 hdev->asic_funcs->restore_phase_topology(hdev);
1712 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1713 num_chunks = args->in.num_chunks_restore;
1717 "Need to run restore phase but restore CS is empty\n");
1720 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1721 cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
1724 mutex_unlock(&hpriv->restore_phase_mutex);
1728 "Failed to submit restore CS for context %d (%d)\n",
1733 /* Need to wait for restore completion before execution phase */
1735 enum hl_cs_wait_status status;
1737 ret = _hl_cs_wait_ioctl(hdev, ctx,
1738 jiffies_to_usecs(hdev->timeout_jiffies),
1739 *cs_seq, &status, NULL);
1741 if (ret == -ERESTARTSYS) {
1742 usleep_range(100, 200);
1747 "Restore CS for context %d failed to complete %d\n",
1754 if (hdev->supports_ctx_switch)
1755 ctx->thread_ctx_switch_wait_token = 1;
1757 } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1758 rc = hl_poll_timeout_memory(hdev,
1759 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1760 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1762 if (rc == -ETIMEDOUT) {
1764 "context switch phase timeout (%d)\n", tmp);
1770 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1771 hl_device_reset(hdev, 0);
1777 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1778 * if the SOB value reaches the max value move to the other SOB reserved
1780 * @hdev: pointer to device structure
1781 * @q_idx: stream queue index
1782 * @hw_sob: the H/W SOB used in this signal CS.
1783 * @count: signals count
1784 * @encaps_sig: tells whether it's reservation for encaps signals or not.
1786 * Note that this function must be called while hw_queues_lock is taken.
1788 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1789 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1792 struct hl_sync_stream_properties *prop;
1793 struct hl_hw_sob *sob = *hw_sob, *other_sob;
1794 u8 other_sob_offset;
1796 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1800 /* check for wraparound */
1801 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1803 * Decrement as we reached the max value.
1804 * The release function won't be called here as we've
1805 * just incremented the refcount right before calling this
1808 hw_sob_put_err(sob);
1811 * check the other sob value, if it still in use then fail
1812 * otherwise make the switch
1814 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1815 other_sob = &prop->hw_sob[other_sob_offset];
1817 if (kref_read(&other_sob->kref) != 1) {
1818 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1824 * next_sob_val always points to the next available signal
1825 * in the sob, so in encaps signals it will be the next one
1826 * after reserving the required amount.
1829 prop->next_sob_val = count + 1;
1831 prop->next_sob_val = count;
1833 /* only two SOBs are currently in use */
1834 prop->curr_sob_offset = other_sob_offset;
1835 *hw_sob = other_sob;
1838 * check if other_sob needs reset, then do it before using it
1839 * for the reservation or the next signal cs.
1840 * we do it here, and for both encaps and regular signal cs
1841 * cases in order to avoid possible races of two kref_put
1842 * of the sob which can occur at the same time if we move the
1843 * sob reset(kref_put) to cs_do_release function.
1844 * in addition, if we have combination of cs signal and
1845 * encaps, and at the point we need to reset the sob there was
1846 * no more reservations and only signal cs keep coming,
1847 * in such case we need signal_cs to put the refcount and
1850 if (other_sob->need_reset)
1851 hw_sob_put(other_sob);
1854 /* set reset indication for the sob */
1855 sob->need_reset = true;
1856 hw_sob_get(other_sob);
1859 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1860 prop->curr_sob_offset, q_idx);
1862 prop->next_sob_val += count;
1868 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1869 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1870 bool encaps_signals)
1872 u64 *signal_seq_arr = NULL;
1873 u32 size_to_copy, signal_seq_arr_len;
1876 if (encaps_signals) {
1877 *signal_seq = chunk->encaps_signal_seq;
1881 signal_seq_arr_len = chunk->num_signal_seq_arr;
1883 /* currently only one signal seq is supported */
1884 if (signal_seq_arr_len != 1) {
1885 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1886 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1888 "Wait for signal CS supports only one signal CS seq\n");
1892 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1893 sizeof(*signal_seq_arr),
1895 if (!signal_seq_arr)
1896 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1897 sizeof(*signal_seq_arr),
1899 if (!signal_seq_arr) {
1900 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1901 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1905 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1906 if (copy_from_user(signal_seq_arr,
1907 u64_to_user_ptr(chunk->signal_seq_arr),
1909 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1910 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1912 "Failed to copy signal seq array from user\n");
1917 /* currently it is guaranteed to have only one signal seq */
1918 *signal_seq = signal_seq_arr[0];
1921 kfree(signal_seq_arr);
1926 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1927 struct hl_ctx *ctx, struct hl_cs *cs,
1928 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1930 struct hl_cs_counters_atomic *cntr;
1931 struct hl_cs_job *job;
1935 cntr = &hdev->aggregated_cs_counters;
1937 job = hl_cs_allocate_job(hdev, q_type, true);
1939 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1940 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1941 dev_err(hdev->dev, "Failed to allocate a new job\n");
1945 if (cs->type == CS_TYPE_WAIT)
1946 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1948 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1950 cb = hl_cb_kernel_create(hdev, cb_size,
1951 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1953 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1954 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1962 atomic_inc(&job->user_cb->cs_cnt);
1963 job->user_cb_size = cb_size;
1964 job->hw_queue_id = q_idx;
1966 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1967 && cs->encaps_signals)
1968 job->encaps_sig_wait_offset = encaps_signal_offset;
1970 * No need in parsing, user CB is the patched CB.
1971 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1972 * the CB idr anymore and to decrement its refcount as it was
1973 * incremented inside hl_cb_kernel_create().
1975 job->patched_cb = job->user_cb;
1976 job->job_cb_size = job->user_cb_size;
1977 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
1979 /* increment refcount as for external queues we get completion */
1982 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1985 list_add_tail(&job->cs_node, &cs->job_list);
1987 hl_debugfs_add_job(hdev, job);
1992 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1993 u32 q_idx, u32 count,
1994 u32 *handle_id, u32 *sob_addr,
1997 struct hw_queue_properties *hw_queue_prop;
1998 struct hl_sync_stream_properties *prop;
1999 struct hl_device *hdev = hpriv->hdev;
2000 struct hl_cs_encaps_sig_handle *handle;
2001 struct hl_encaps_signals_mgr *mgr;
2002 struct hl_hw_sob *hw_sob;
2006 if (count >= HL_MAX_SOB_VAL) {
2007 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
2013 if (q_idx >= hdev->asic_prop.max_queues) {
2014 dev_err(hdev->dev, "Queue index %d is invalid\n",
2020 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2022 if (!hw_queue_prop->supports_sync_stream) {
2024 "Queue index %d does not support sync stream operations\n",
2030 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2032 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
2038 handle->count = count;
2040 hl_ctx_get(hpriv->ctx);
2041 handle->ctx = hpriv->ctx;
2042 mgr = &hpriv->ctx->sig_mgr;
2044 spin_lock(&mgr->lock);
2045 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
2046 spin_unlock(&mgr->lock);
2049 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
2054 handle->id = hdl_id;
2055 handle->q_idx = q_idx;
2056 handle->hdev = hdev;
2057 kref_init(&handle->refcount);
2059 hdev->asic_funcs->hw_queues_lock(hdev);
2061 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2064 * Increment the SOB value by count by user request
2065 * to reserve those signals
2066 * check if the signals amount to reserve is not exceeding the max sob
2067 * value, if yes then switch sob.
2069 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
2072 dev_err(hdev->dev, "Failed to switch SOB\n");
2073 hdev->asic_funcs->hw_queues_unlock(hdev);
2077 /* set the hw_sob to the handle after calling the sob wraparound handler
2078 * since sob could have changed.
2080 handle->hw_sob = hw_sob;
2082 /* store the current sob value for unreserve validity check, and
2083 * signal offset support
2085 handle->pre_sob_val = prop->next_sob_val - handle->count;
2087 handle->cs_seq = ULLONG_MAX;
2089 *signals_count = prop->next_sob_val;
2090 hdev->asic_funcs->hw_queues_unlock(hdev);
2092 *sob_addr = handle->hw_sob->sob_addr;
2093 *handle_id = hdl_id;
2096 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
2097 hw_sob->sob_id, handle->hw_sob->sob_addr,
2098 prop->next_sob_val - 1, q_idx, hdl_id);
2102 spin_lock(&mgr->lock);
2103 idr_remove(&mgr->handles, hdl_id);
2104 spin_unlock(&mgr->lock);
2107 hl_ctx_put(handle->ctx);
2114 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
2116 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
2117 struct hl_sync_stream_properties *prop;
2118 struct hl_device *hdev = hpriv->hdev;
2119 struct hl_encaps_signals_mgr *mgr;
2120 struct hl_hw_sob *hw_sob;
2121 u32 q_idx, sob_addr;
2124 mgr = &hpriv->ctx->sig_mgr;
2126 spin_lock(&mgr->lock);
2127 encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
2128 if (encaps_sig_hdl) {
2129 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
2130 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
2131 encaps_sig_hdl->count);
2133 hdev->asic_funcs->hw_queues_lock(hdev);
2135 q_idx = encaps_sig_hdl->q_idx;
2136 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2137 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2138 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
2140 /* Check if sob_val got out of sync due to other
2141 * signal submission requests which were handled
2142 * between the reserve-unreserve calls or SOB switch
2143 * upon reaching SOB max value.
2145 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
2146 != prop->next_sob_val ||
2147 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
2148 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
2149 encaps_sig_hdl->pre_sob_val,
2150 (prop->next_sob_val - encaps_sig_hdl->count));
2152 hdev->asic_funcs->hw_queues_unlock(hdev);
2158 * Decrement the SOB value by count by user request
2159 * to unreserve those signals
2161 prop->next_sob_val -= encaps_sig_hdl->count;
2163 hdev->asic_funcs->hw_queues_unlock(hdev);
2167 /* Release the id and free allocated memory of the handle */
2168 idr_remove(&mgr->handles, handle_id);
2169 hl_ctx_put(encaps_sig_hdl->ctx);
2170 kfree(encaps_sig_hdl);
2173 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
2176 spin_unlock(&mgr->lock);
2181 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
2182 void __user *chunks, u32 num_chunks,
2183 u64 *cs_seq, u32 flags, u32 timeout,
2184 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
2186 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
2187 bool handle_found = false, is_wait_cs = false,
2188 wait_cs_submitted = false,
2189 cs_encaps_signals = false;
2190 struct hl_cs_chunk *cs_chunk_array, *chunk;
2191 bool staged_cs_with_encaps_signals = false;
2192 struct hw_queue_properties *hw_queue_prop;
2193 struct hl_device *hdev = hpriv->hdev;
2194 struct hl_cs_compl *sig_waitcs_cmpl;
2195 u32 q_idx, collective_engine_id = 0;
2196 struct hl_cs_counters_atomic *cntr;
2197 struct hl_fence *sig_fence = NULL;
2198 struct hl_ctx *ctx = hpriv->ctx;
2199 enum hl_queue_type q_type;
2204 cntr = &hdev->aggregated_cs_counters;
2205 *cs_seq = ULLONG_MAX;
2207 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
2212 /* currently it is guaranteed to have only one chunk */
2213 chunk = &cs_chunk_array[0];
2215 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
2216 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2217 atomic64_inc(&cntr->validation_drop_cnt);
2218 dev_err(hdev->dev, "Queue index %d is invalid\n",
2219 chunk->queue_index);
2221 goto free_cs_chunk_array;
2224 q_idx = chunk->queue_index;
2225 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2226 q_type = hw_queue_prop->type;
2228 if (!hw_queue_prop->supports_sync_stream) {
2229 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2230 atomic64_inc(&cntr->validation_drop_cnt);
2232 "Queue index %d does not support sync stream operations\n",
2235 goto free_cs_chunk_array;
2238 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2239 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2240 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2241 atomic64_inc(&cntr->validation_drop_cnt);
2243 "Queue index %d is invalid\n", q_idx);
2245 goto free_cs_chunk_array;
2248 if (!hdev->nic_ports_mask) {
2249 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2250 atomic64_inc(&cntr->validation_drop_cnt);
2252 "Collective operations not supported when NIC ports are disabled");
2254 goto free_cs_chunk_array;
2257 collective_engine_id = chunk->collective_engine_id;
2260 is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2261 cs_type == CS_TYPE_COLLECTIVE_WAIT);
2263 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2266 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2267 ctx, cs_encaps_signals);
2269 goto free_cs_chunk_array;
2271 if (cs_encaps_signals) {
2272 /* check if cs sequence has encapsulated
2278 spin_lock(&ctx->sig_mgr.lock);
2279 idp = &ctx->sig_mgr.handles;
2280 idr_for_each_entry(idp, encaps_sig_hdl, id) {
2281 if (encaps_sig_hdl->cs_seq == signal_seq) {
2282 /* get refcount to protect removing this handle from idr,
2283 * needed when multiple wait cs are used with offset
2284 * to wait on reserved encaps signals.
2285 * Since kref_put of this handle is executed outside the
2286 * current lock, it is possible that the handle refcount
2287 * is 0 but it yet to be removed from the list. In this
2288 * case need to consider the handle as not valid.
2290 if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
2291 handle_found = true;
2295 spin_unlock(&ctx->sig_mgr.lock);
2297 if (!handle_found) {
2298 /* treat as signal CS already finished */
2299 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2302 goto free_cs_chunk_array;
2305 /* validate also the signal offset value */
2306 if (chunk->encaps_signal_offset >
2307 encaps_sig_hdl->count) {
2308 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2309 chunk->encaps_signal_offset,
2310 encaps_sig_hdl->count);
2312 goto free_cs_chunk_array;
2316 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2317 if (IS_ERR(sig_fence)) {
2318 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2319 atomic64_inc(&cntr->validation_drop_cnt);
2321 "Failed to get signal CS with seq 0x%llx\n",
2323 rc = PTR_ERR(sig_fence);
2324 goto free_cs_chunk_array;
2328 /* signal CS already finished */
2330 goto free_cs_chunk_array;
2334 container_of(sig_fence, struct hl_cs_compl, base_fence);
2336 staged_cs_with_encaps_signals = !!
2337 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2338 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2340 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2341 !staged_cs_with_encaps_signals) {
2342 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2343 atomic64_inc(&cntr->validation_drop_cnt);
2345 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2347 hl_fence_put(sig_fence);
2349 goto free_cs_chunk_array;
2352 if (completion_done(&sig_fence->completion)) {
2353 /* signal CS already finished */
2354 hl_fence_put(sig_fence);
2356 goto free_cs_chunk_array;
2360 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2363 hl_fence_put(sig_fence);
2365 goto free_cs_chunk_array;
2369 * Save the signal CS fence for later initialization right before
2370 * hanging the wait CS on the queue.
2371 * for encaps signals case, we save the cs sequence and handle pointer
2372 * for later initialization.
2375 cs->signal_fence = sig_fence;
2376 /* store the handle pointer, so we don't have to
2377 * look for it again, later on the flow
2378 * when we need to set SOB info in hw_queue.
2380 if (cs->encaps_signals)
2381 cs->encaps_sig_hdl = encaps_sig_hdl;
2384 hl_debugfs_add_cs(cs);
2386 *cs_seq = cs->sequence;
2388 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2389 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2390 q_idx, chunk->encaps_signal_offset);
2391 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2392 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2393 cs, q_idx, collective_engine_id,
2394 chunk->encaps_signal_offset);
2396 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2397 atomic64_inc(&cntr->validation_drop_cnt);
2402 goto free_cs_object;
2404 if (q_type == QUEUE_TYPE_HW)
2405 INIT_WORK(&cs->finish_work, cs_completion);
2407 rc = hl_hw_queue_schedule_cs(cs);
2409 /* In case wait cs failed here, it means the signal cs
2410 * already completed. we want to free all it's related objects
2411 * but we don't want to fail the ioctl.
2415 else if (rc != -EAGAIN)
2417 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2418 ctx->asid, cs->sequence, rc);
2419 goto free_cs_object;
2422 *signal_sob_addr_offset = cs->sob_addr_offset;
2423 *signal_initial_sob_count = cs->initial_sob_count;
2425 rc = HL_CS_STATUS_SUCCESS;
2427 wait_cs_submitted = true;
2431 cs_rollback(hdev, cs);
2432 *cs_seq = ULLONG_MAX;
2433 /* The path below is both for good and erroneous exits */
2435 /* We finished with the CS in this function, so put the ref */
2437 free_cs_chunk_array:
2438 if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
2439 kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
2440 kfree(cs_chunk_array);
2445 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
2446 u32 num_engine_cores, u32 core_command)
2449 struct hl_device *hdev = hpriv->hdev;
2450 void __user *engine_cores_arr;
2453 if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
2454 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
2458 if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
2459 dev_err(hdev->dev, "Engine core command is invalid\n");
2463 engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
2464 cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
2468 if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
2469 dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
2474 rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
2480 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
2482 struct hl_device *hdev = hpriv->hdev;
2483 struct asic_fixed_properties *prop = &hdev->asic_prop;
2485 if (!prop->hbw_flush_reg) {
2486 dev_dbg(hdev->dev, "HBW flush is not supported\n");
2490 RREG32(prop->hbw_flush_reg);
2495 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2497 union hl_cs_args *args = data;
2498 enum hl_cs_type cs_type = 0;
2499 u64 cs_seq = ULONG_MAX;
2500 void __user *chunks;
2501 u32 num_chunks, flags, timeout,
2502 signals_count = 0, sob_addr = 0, handle_id = 0;
2503 u16 sob_initial_count = 0;
2506 rc = hl_cs_sanity_checks(hpriv, args);
2510 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2514 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2515 ~HL_CS_FLAGS_FORCE_RESTORE);
2516 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2517 num_chunks = args->in.num_chunks_execute;
2518 flags = args->in.cs_flags;
2520 /* In case this is a staged CS, user should supply the CS sequence */
2521 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2522 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2523 cs_seq = args->in.seq;
2525 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2526 ? msecs_to_jiffies(args->in.timeout * 1000)
2527 : hpriv->hdev->timeout_jiffies;
2530 case CS_TYPE_SIGNAL:
2532 case CS_TYPE_COLLECTIVE_WAIT:
2533 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2534 &cs_seq, args->in.cs_flags, timeout,
2535 &sob_addr, &sob_initial_count);
2537 case CS_RESERVE_SIGNALS:
2538 rc = cs_ioctl_reserve_signals(hpriv,
2539 args->in.encaps_signals_q_idx,
2540 args->in.encaps_signals_count,
2541 &handle_id, &sob_addr, &signals_count);
2543 case CS_UNRESERVE_SIGNALS:
2544 rc = cs_ioctl_unreserve_signals(hpriv,
2545 args->in.encaps_sig_handle_id);
2547 case CS_TYPE_ENGINE_CORE:
2548 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
2549 args->in.num_engine_cores, args->in.core_command);
2551 case CS_TYPE_FLUSH_PCI_HBW_WRITES:
2552 rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
2555 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2557 args->in.encaps_sig_handle_id,
2558 timeout, &sob_initial_count);
2562 if (rc != -EAGAIN) {
2563 memset(args, 0, sizeof(*args));
2566 case CS_RESERVE_SIGNALS:
2567 args->out.handle_id = handle_id;
2568 args->out.sob_base_addr_offset = sob_addr;
2569 args->out.count = signals_count;
2571 case CS_TYPE_SIGNAL:
2572 args->out.sob_base_addr_offset = sob_addr;
2573 args->out.sob_count_before_submission = sob_initial_count;
2574 args->out.seq = cs_seq;
2576 case CS_TYPE_DEFAULT:
2577 args->out.sob_count_before_submission = sob_initial_count;
2578 args->out.seq = cs_seq;
2581 args->out.seq = cs_seq;
2585 args->out.status = rc;
2591 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2592 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
2594 struct hl_device *hdev = ctx->hdev;
2595 ktime_t timestamp_kt;
2599 if (IS_ERR(fence)) {
2600 rc = PTR_ERR(fence);
2602 dev_notice_ratelimited(hdev->dev,
2603 "Can't wait on CS %llu because current CS is at seq %llu\n",
2604 seq, ctx->cs_sequence);
2609 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, ×tamp_kt, &error)) {
2611 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2612 seq, ctx->cs_sequence);
2613 *status = CS_WAIT_STATUS_GONE;
2618 goto report_results;
2622 completion_rc = completion_done(&fence->completion);
2624 unsigned long timeout;
2626 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2627 timeout_us : usecs_to_jiffies(timeout_us);
2629 wait_for_completion_interruptible_timeout(
2630 &fence->completion, timeout);
2633 error = fence->error;
2634 timestamp_kt = fence->timestamp;
2637 if (completion_rc > 0) {
2638 *status = CS_WAIT_STATUS_COMPLETED;
2640 *timestamp = ktime_to_ns(timestamp_kt);
2642 *status = CS_WAIT_STATUS_BUSY;
2645 if (completion_rc == -ERESTARTSYS)
2647 else if (error == -ETIMEDOUT || error == -EIO)
2654 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2656 * @mcs_data: multi-CS internal data
2657 * @mcs_compl: multi-CS completion structure
2659 * @return 0 on success, otherwise non 0 error code
2661 * The function iterates on all CS sequence in the list and set bit in
2662 * completion_bitmap for each completed CS.
2663 * While iterating, the function sets the stream map of each fence in the fence
2664 * array in the completion QID stream map to be used by CSs to perform
2665 * completion to the multi-CS context.
2666 * This function shall be called after taking context ref
2668 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
2670 struct hl_fence **fence_ptr = mcs_data->fence_arr;
2671 struct hl_device *hdev = mcs_data->ctx->hdev;
2672 int i, rc, arr_len = mcs_data->arr_len;
2673 u64 *seq_arr = mcs_data->seq_arr;
2674 ktime_t max_ktime, first_cs_time;
2675 enum hl_cs_wait_status status;
2677 memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
2679 /* get all fences under the same lock */
2680 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2685 * re-initialize the completion here to handle 2 possible cases:
2686 * 1. CS will complete the multi-CS prior clearing the completion. in which
2687 * case the fence iteration is guaranteed to catch the CS completion.
2688 * 2. the completion will occur after re-init of the completion.
2689 * in which case we will wake up immediately in wait_for_completion.
2691 reinit_completion(&mcs_compl->completion);
2694 * set to maximum time to verify timestamp is valid: if at the end
2695 * this value is maintained- no timestamp was updated
2697 max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2698 first_cs_time = max_ktime;
2700 for (i = 0; i < arr_len; i++, fence_ptr++) {
2701 struct hl_fence *fence = *fence_ptr;
2704 * In order to prevent case where we wait until timeout even though a CS associated
2705 * with the multi-CS actually completed we do things in the below order:
2706 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
2707 * any CS can, potentially, complete the multi CS for the specific QID (note
2708 * that once completion is initialized, calling complete* and then wait on the
2709 * completion will cause it to return at once)
2710 * 2. only after allowing multi-CS completion for the specific QID we check whether
2711 * the specific CS already completed (and thus the wait for completion part will
2712 * be skipped). if the CS not completed it is guaranteed that completing CS will
2713 * wake up the completion.
2716 mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
2719 * function won't sleep as it is called with timeout 0 (i.e.
2722 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2725 "wait_for_fence error :%d for CS seq %llu\n",
2731 case CS_WAIT_STATUS_BUSY:
2732 /* CS did not finished, QID to wait on already stored */
2734 case CS_WAIT_STATUS_COMPLETED:
2736 * Using mcs_handling_done to avoid possibility of mcs_data
2737 * returns to user indicating CS completed before it finished
2738 * all of its mcs handling, to avoid race the next time the
2739 * user waits for mcs.
2740 * note: when reaching this case fence is definitely not NULL
2741 * but NULL check was added to overcome static analysis
2743 if (fence && !fence->mcs_handling_done) {
2745 * in case multi CS is completed but MCS handling not done
2746 * we "complete" the multi CS to prevent it from waiting
2747 * until time-out and the "multi-CS handling done" will have
2748 * another chance at the next iteration
2750 complete_all(&mcs_compl->completion);
2754 mcs_data->completion_bitmap |= BIT(i);
2756 * For all completed CSs we take the earliest timestamp.
2757 * For this we have to validate that the timestamp is
2758 * earliest of all timestamps so far.
2760 if (fence && mcs_data->update_ts &&
2761 (ktime_compare(fence->timestamp, first_cs_time) < 0))
2762 first_cs_time = fence->timestamp;
2764 case CS_WAIT_STATUS_GONE:
2765 mcs_data->update_ts = false;
2766 mcs_data->gone_cs = true;
2768 * It is possible to get an old sequence numbers from user
2769 * which related to already completed CSs and their fences
2770 * already gone. In this case, CS set as completed but
2771 * no need to consider its QID for mcs completion.
2773 mcs_data->completion_bitmap |= BIT(i);
2776 dev_err(hdev->dev, "Invalid fence status\n");
2783 hl_fences_put(mcs_data->fence_arr, arr_len);
2785 if (mcs_data->update_ts &&
2786 (ktime_compare(first_cs_time, max_ktime) != 0))
2787 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2792 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2793 enum hl_cs_wait_status *status, s64 *timestamp)
2795 struct hl_fence *fence;
2803 fence = hl_ctx_get_fence(ctx, seq);
2805 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2806 hl_fence_put(fence);
2812 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
2814 if (usecs <= U32_MAX)
2815 return usecs_to_jiffies(usecs);
2818 * If the value in nanoseconds is larger than 64 bit, use the largest
2821 if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
2822 return nsecs_to_jiffies(U64_MAX);
2824 return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
2828 * hl_wait_multi_cs_completion_init - init completion structure
2830 * @hdev: pointer to habanalabs device structure
2831 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2832 * master QID to wait on
2834 * @return valid completion struct pointer on success, otherwise error pointer
2836 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2837 * the function gets the first available completion (by marking it "used")
2838 * and initialize its values.
2840 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
2842 struct multi_cs_completion *mcs_compl;
2845 /* find free multi_cs completion structure */
2846 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2847 mcs_compl = &hdev->multi_cs_completion[i];
2848 spin_lock(&mcs_compl->lock);
2849 if (!mcs_compl->used) {
2850 mcs_compl->used = 1;
2851 mcs_compl->timestamp = 0;
2853 * init QID map to 0 to avoid completion by CSs. the actual QID map
2854 * to multi-CS CSs will be set incrementally at a later stage
2856 mcs_compl->stream_master_qid_map = 0;
2857 spin_unlock(&mcs_compl->lock);
2860 spin_unlock(&mcs_compl->lock);
2863 if (i == MULTI_CS_MAX_USER_CTX) {
2864 dev_err(hdev->dev, "no available multi-CS completion structure\n");
2865 return ERR_PTR(-ENOMEM);
2871 * hl_wait_multi_cs_completion_fini - return completion structure and set as
2874 * @mcs_compl: pointer to the completion structure
2876 static void hl_wait_multi_cs_completion_fini(
2877 struct multi_cs_completion *mcs_compl)
2880 * free completion structure, do it under lock to be in-sync with the
2881 * thread that signals completion
2883 spin_lock(&mcs_compl->lock);
2884 mcs_compl->used = 0;
2885 spin_unlock(&mcs_compl->lock);
2889 * hl_wait_multi_cs_completion - wait for first CS to complete
2891 * @mcs_data: multi-CS internal data
2893 * @return 0 on success, otherwise non 0 error code
2895 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
2896 struct multi_cs_completion *mcs_compl)
2900 completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
2901 mcs_data->timeout_jiffies);
2903 /* update timestamp */
2904 if (completion_rc > 0)
2905 mcs_data->timestamp = mcs_compl->timestamp;
2907 if (completion_rc == -ERESTARTSYS)
2908 return completion_rc;
2910 mcs_data->wait_status = completion_rc;
2916 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2918 * @hdev: pointer to habanalabs device structure
2920 void hl_multi_cs_completion_init(struct hl_device *hdev)
2922 struct multi_cs_completion *mcs_cmpl;
2925 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2926 mcs_cmpl = &hdev->multi_cs_completion[i];
2928 spin_lock_init(&mcs_cmpl->lock);
2929 init_completion(&mcs_cmpl->completion);
2934 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2936 * @hpriv: pointer to the private data of the fd
2937 * @data: pointer to multi-CS wait ioctl in/out args
2940 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2942 struct multi_cs_completion *mcs_compl;
2943 struct hl_device *hdev = hpriv->hdev;
2944 struct multi_cs_data mcs_data = {};
2945 union hl_wait_cs_args *args = data;
2946 struct hl_ctx *ctx = hpriv->ctx;
2947 struct hl_fence **fence_arr;
2948 void __user *seq_arr;
2954 for (i = 0 ; i < sizeof(args->in.pad) ; i++)
2955 if (args->in.pad[i]) {
2956 dev_dbg(hdev->dev, "Padding bytes must be 0\n");
2960 if (!hdev->supports_wait_for_multi_cs) {
2961 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2965 seq_arr_len = args->in.seq_arr_len;
2967 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2968 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2969 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2973 /* allocate memory for sequence array */
2975 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2979 /* copy CS sequence array from user */
2980 seq_arr = (void __user *) (uintptr_t) args->in.seq;
2981 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2982 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2983 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2988 /* allocate array for the fences */
2989 fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
2995 /* initialize the multi-CS internal data */
2997 mcs_data.seq_arr = cs_seq_arr;
2998 mcs_data.fence_arr = fence_arr;
2999 mcs_data.arr_len = seq_arr_len;
3003 /* wait (with timeout) for the first CS to be completed */
3004 mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
3005 mcs_compl = hl_wait_multi_cs_completion_init(hdev);
3006 if (IS_ERR(mcs_compl)) {
3007 rc = PTR_ERR(mcs_compl);
3011 /* poll all CS fences, extract timestamp */
3012 mcs_data.update_ts = true;
3013 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3015 * skip wait for CS completion when one of the below is true:
3016 * - an error on the poll function
3017 * - one or more CS in the list completed
3018 * - the user called ioctl with timeout 0
3020 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
3021 goto completion_fini;
3024 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
3025 if (rc || (mcs_data.wait_status == 0))
3029 * poll fences once again to update the CS map.
3030 * no timestamp should be updated this time.
3032 mcs_data.update_ts = false;
3033 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3035 if (rc || mcs_data.completion_bitmap)
3039 * if hl_wait_multi_cs_completion returned before timeout (i.e.
3040 * it got a completion) it either got completed by CS in the multi CS list
3041 * (in which case the indication will be non empty completion_bitmap) or it
3042 * got completed by CS submitted to one of the shared stream master but
3043 * not in the multi CS list (in which case we should wait again but modify
3044 * the timeout and set timestamp as zero to let a CS related to the current
3045 * multi-CS set a new, relevant, timestamp)
3047 mcs_data.timeout_jiffies = mcs_data.wait_status;
3048 mcs_compl->timestamp = 0;
3052 hl_wait_multi_cs_completion_fini(mcs_compl);
3061 if (rc == -ERESTARTSYS) {
3062 dev_err_ratelimited(hdev->dev,
3063 "user process got signal while waiting for Multi-CS\n");
3070 /* update output args */
3071 memset(args, 0, sizeof(*args));
3073 if (mcs_data.completion_bitmap) {
3074 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3075 args->out.cs_completion_map = mcs_data.completion_bitmap;
3077 /* if timestamp not 0- it's valid */
3078 if (mcs_data.timestamp) {
3079 args->out.timestamp_nsec = mcs_data.timestamp;
3080 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3083 /* update if some CS was gone */
3084 if (!mcs_data.timestamp)
3085 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3087 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3093 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3095 struct hl_device *hdev = hpriv->hdev;
3096 union hl_wait_cs_args *args = data;
3097 enum hl_cs_wait_status status;
3098 u64 seq = args->in.seq;
3102 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, ×tamp);
3104 if (rc == -ERESTARTSYS) {
3105 dev_err_ratelimited(hdev->dev,
3106 "user process got signal while waiting for CS handle %llu\n",
3111 memset(args, 0, sizeof(*args));
3114 if (rc == -ETIMEDOUT) {
3115 dev_err_ratelimited(hdev->dev,
3116 "CS %llu has timed-out while user process is waiting for it\n",
3118 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
3119 } else if (rc == -EIO) {
3120 dev_err_ratelimited(hdev->dev,
3121 "CS %llu has been aborted while user process is waiting for it\n",
3123 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
3129 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3130 args->out.timestamp_nsec = timestamp;
3134 case CS_WAIT_STATUS_GONE:
3135 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3137 case CS_WAIT_STATUS_COMPLETED:
3138 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3140 case CS_WAIT_STATUS_BUSY:
3142 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3149 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
3150 struct hl_cb *cq_cb,
3151 u64 ts_offset, u64 cq_offset, u64 target_value,
3152 spinlock_t *wait_list_lock,
3153 struct hl_user_pending_interrupt **pend)
3155 struct hl_ts_buff *ts_buff = buf->private;
3156 struct hl_user_pending_interrupt *requested_offset_record =
3157 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3159 struct hl_user_pending_interrupt *cb_last =
3160 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3161 (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
3162 unsigned long flags, iter_counter = 0;
3163 u64 current_cq_counter;
3166 /* Validate ts_offset not exceeding last max */
3167 if (requested_offset_record >= cb_last) {
3168 dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
3169 (u64)(uintptr_t)cb_last);
3173 timestamp = ktime_get();
3176 spin_lock_irqsave(wait_list_lock, flags);
3178 /* Unregister only if we didn't reach the target value
3179 * since in this case there will be no handling in irq context
3180 * and then it's safe to delete the node out of the interrupt list
3181 * then re-use it on other interrupt
3183 if (requested_offset_record->ts_reg_info.in_use) {
3184 current_cq_counter = *requested_offset_record->cq_kernel_addr;
3185 if (current_cq_counter < requested_offset_record->cq_target_value) {
3186 list_del(&requested_offset_record->wait_list_node);
3187 spin_unlock_irqrestore(wait_list_lock, flags);
3189 hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
3190 hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
3192 dev_dbg(buf->mmg->dev,
3193 "ts node removed from interrupt list now can re-use\n");
3195 dev_dbg(buf->mmg->dev,
3196 "ts node in middle of irq handling\n");
3198 /* irq handling in the middle give it time to finish */
3199 spin_unlock_irqrestore(wait_list_lock, flags);
3200 usleep_range(100, 1000);
3201 if (++iter_counter == MAX_TS_ITER_NUM) {
3202 dev_err(buf->mmg->dev,
3203 "Timestamp offset processing reached timeout of %lld ms\n",
3204 ktime_ms_delta(ktime_get(), timestamp));
3211 /* Fill up the new registration node info */
3212 requested_offset_record->ts_reg_info.buf = buf;
3213 requested_offset_record->ts_reg_info.cq_cb = cq_cb;
3214 requested_offset_record->ts_reg_info.timestamp_kernel_addr =
3215 (u64 *) ts_buff->user_buff_address + ts_offset;
3216 requested_offset_record->cq_kernel_addr =
3217 (u64 *) cq_cb->kernel_address + cq_offset;
3218 requested_offset_record->cq_target_value = target_value;
3220 spin_unlock_irqrestore(wait_list_lock, flags);
3223 *pend = requested_offset_record;
3225 dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
3226 requested_offset_record);
3230 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3231 struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
3232 u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
3233 u64 target_value, struct hl_user_interrupt *interrupt,
3234 bool register_ts_record, u64 ts_handle, u64 ts_offset,
3235 u32 *status, u64 *timestamp)
3237 struct hl_user_pending_interrupt *pend;
3238 struct hl_mmap_mem_buf *buf;
3239 struct hl_cb *cq_cb;
3240 unsigned long timeout, flags;
3244 timeout = hl_usecs64_to_jiffies(timeout_us);
3248 cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
3254 /* Validate the cq offset */
3255 if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
3256 ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
3261 if (register_ts_record) {
3262 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
3263 interrupt->interrupt_id, ts_offset, cq_counters_offset);
3264 buf = hl_mmap_mem_buf_get(mmg, ts_handle);
3270 /* get ts buffer record */
3271 rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
3272 cq_counters_offset, target_value,
3273 &interrupt->wait_list_lock, &pend);
3277 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3282 hl_fence_init(&pend->fence, ULONG_MAX);
3283 pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
3284 pend->cq_target_value = target_value;
3287 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3289 /* We check for completion value as interrupt could have been received
3290 * before we added the node to the wait list
3292 if (*pend->cq_kernel_addr >= target_value) {
3293 if (register_ts_record)
3294 pend->ts_reg_info.in_use = 0;
3295 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3297 *status = HL_WAIT_CS_STATUS_COMPLETED;
3299 if (register_ts_record) {
3300 *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
3303 pend->fence.timestamp = ktime_get();
3306 } else if (!timeout_us) {
3307 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3308 *status = HL_WAIT_CS_STATUS_BUSY;
3309 pend->fence.timestamp = ktime_get();
3313 /* Add pending user interrupt to relevant list for the interrupt
3314 * handler to monitor.
3315 * Note that we cannot have sorted list by target value,
3316 * in order to shorten the list pass loop, since
3317 * same list could have nodes for different cq counter handle.
3319 * Mark ts buff offset as in use here in the spinlock protection area
3320 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
3321 * before adding the node to the list. this scenario might happen when
3322 * multiple threads are racing on same offset and one thread could
3323 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
3324 * takes over and get to ts_buff_get_kernel_ts_record and then we will try
3325 * to re-use the same ts buff offset, and will try to delete a non existing
3326 * node from the list.
3328 if (register_ts_record)
3329 pend->ts_reg_info.in_use = 1;
3331 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3332 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3334 if (register_ts_record) {
3335 rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
3336 goto ts_registration_exit;
3339 /* Wait for interrupt handler to signal completion */
3340 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3342 if (completion_rc > 0) {
3343 *status = HL_WAIT_CS_STATUS_COMPLETED;
3345 if (completion_rc == -ERESTARTSYS) {
3346 dev_err_ratelimited(hdev->dev,
3347 "user process got signal while waiting for interrupt ID %d\n",
3348 interrupt->interrupt_id);
3350 *status = HL_WAIT_CS_STATUS_ABORTED;
3352 if (pend->fence.error == -EIO) {
3353 dev_err_ratelimited(hdev->dev,
3354 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3357 *status = HL_WAIT_CS_STATUS_ABORTED;
3359 /* The wait has timed-out. We don't know anything beyond that
3360 * because the workload wasn't submitted through the driver.
3361 * Therefore, from driver's perspective, the workload is still
3365 *status = HL_WAIT_CS_STATUS_BUSY;
3371 * We keep removing the node from list here, and not at the irq handler
3372 * for completion timeout case. and if it's a registration
3373 * for ts record, the node will be deleted in the irq handler after
3374 * we reach the target value.
3376 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3377 list_del(&pend->wait_list_node);
3378 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3381 *timestamp = ktime_to_ns(pend->fence.timestamp);
3384 ts_registration_exit:
3390 hl_mmap_mem_buf_put(buf);
3399 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3400 u64 timeout_us, u64 user_address,
3401 u64 target_value, struct hl_user_interrupt *interrupt,
3405 struct hl_user_pending_interrupt *pend;
3406 unsigned long timeout, flags;
3407 u64 completion_value;
3411 timeout = hl_usecs64_to_jiffies(timeout_us);
3415 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3421 hl_fence_init(&pend->fence, ULONG_MAX);
3423 /* Add pending user interrupt to relevant list for the interrupt
3424 * handler to monitor
3426 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3427 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3428 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3430 /* We check for completion value as interrupt could have been received
3431 * before we added the node to the wait list
3433 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3434 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3436 goto remove_pending_user_interrupt;
3439 if (completion_value >= target_value) {
3440 *status = HL_WAIT_CS_STATUS_COMPLETED;
3441 /* There was no interrupt, we assume the completion is now. */
3442 pend->fence.timestamp = ktime_get();
3444 *status = HL_WAIT_CS_STATUS_BUSY;
3447 if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
3448 goto remove_pending_user_interrupt;
3451 /* Wait for interrupt handler to signal completion */
3452 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3455 /* If timeout did not expire we need to perform the comparison.
3456 * If comparison fails, keep waiting until timeout expires
3458 if (completion_rc > 0) {
3459 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3460 /* reinit_completion must be called before we check for user
3461 * completion value, otherwise, if interrupt is received after
3462 * the comparison and before the next wait_for_completion,
3463 * we will reach timeout and fail
3465 reinit_completion(&pend->fence.completion);
3466 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3468 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3469 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3472 goto remove_pending_user_interrupt;
3475 if (completion_value >= target_value) {
3476 *status = HL_WAIT_CS_STATUS_COMPLETED;
3477 } else if (pend->fence.error) {
3478 dev_err_ratelimited(hdev->dev,
3479 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3481 /* set the command completion status as ABORTED */
3482 *status = HL_WAIT_CS_STATUS_ABORTED;
3484 timeout = completion_rc;
3487 } else if (completion_rc == -ERESTARTSYS) {
3488 dev_err_ratelimited(hdev->dev,
3489 "user process got signal while waiting for interrupt ID %d\n",
3490 interrupt->interrupt_id);
3493 /* The wait has timed-out. We don't know anything beyond that
3494 * because the workload wasn't submitted through the driver.
3495 * Therefore, from driver's perspective, the workload is still
3499 *status = HL_WAIT_CS_STATUS_BUSY;
3502 remove_pending_user_interrupt:
3503 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3504 list_del(&pend->wait_list_node);
3505 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3507 *timestamp = ktime_to_ns(pend->fence.timestamp);
3515 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3517 u16 interrupt_id, first_interrupt, last_interrupt;
3518 struct hl_device *hdev = hpriv->hdev;
3519 struct asic_fixed_properties *prop;
3520 struct hl_user_interrupt *interrupt;
3521 union hl_wait_cs_args *args = data;
3522 u32 status = HL_WAIT_CS_STATUS_BUSY;
3526 prop = &hdev->asic_prop;
3528 if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
3529 dev_err(hdev->dev, "no user interrupts allowed");
3533 interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
3535 first_interrupt = prop->first_available_user_interrupt;
3536 last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
3538 if (interrupt_id < prop->user_dec_intr_count) {
3540 /* Check if the requested core is enabled */
3541 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
3542 dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
3547 interrupt = &hdev->user_interrupt[interrupt_id];
3549 } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
3551 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
3552 interrupt = &hdev->user_interrupt[int_idx];
3554 } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
3555 interrupt = &hdev->common_user_cq_interrupt;
3556 } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
3557 interrupt = &hdev->common_decoder_interrupt;
3559 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
3563 if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
3564 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
3565 args->in.interrupt_timeout_us, args->in.cq_counters_handle,
3566 args->in.cq_counters_offset,
3567 args->in.target, interrupt,
3568 !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
3569 args->in.timestamp_handle, args->in.timestamp_offset,
3570 &status, ×tamp);
3572 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
3573 args->in.interrupt_timeout_us, args->in.addr,
3574 args->in.target, interrupt, &status,
3579 memset(args, 0, sizeof(*args));
3580 args->out.status = status;
3583 args->out.timestamp_nsec = timestamp;
3584 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3590 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3592 struct hl_device *hdev = hpriv->hdev;
3593 union hl_wait_cs_args *args = data;
3594 u32 flags = args->in.flags;
3597 /* If the device is not operational, or if an error has happened and user should release the
3598 * device, there is no point in waiting for any command submission or user interrupt.
3600 if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
3603 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
3604 rc = hl_interrupt_wait_ioctl(hpriv, data);
3605 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
3606 rc = hl_multi_cs_wait_ioctl(hpriv, data);
3608 rc = hl_cs_wait_ioctl(hpriv, data);