1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 #define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT)
16 static void job_wq_completion(struct work_struct *work);
17 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
18 struct hl_ctx *ctx, u64 timeout_us, u64 seq);
19 static void cs_do_release(struct kref *ref);
21 static void hl_sob_reset(struct kref *ref)
23 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
25 struct hl_device *hdev = hw_sob->hdev;
27 hdev->asic_funcs->reset_sob(hdev, hw_sob);
30 void hl_sob_reset_error(struct kref *ref)
32 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
34 struct hl_device *hdev = hw_sob->hdev;
37 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
38 hw_sob->q_idx, hw_sob->sob_id);
41 static const char *hl_fence_get_driver_name(struct dma_fence *fence)
46 static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
48 struct hl_cs_compl *hl_cs_compl =
49 container_of(fence, struct hl_cs_compl, base_fence);
51 return dev_name(hl_cs_compl->hdev->dev);
54 static bool hl_fence_enable_signaling(struct dma_fence *fence)
59 static void hl_fence_release(struct dma_fence *fence)
61 struct hl_cs_compl *hl_cs_cmpl =
62 container_of(fence, struct hl_cs_compl, base_fence);
63 struct hl_device *hdev = hl_cs_cmpl->hdev;
65 /* EBUSY means the CS was never submitted and hence we don't have
66 * an attached hw_sob object that we should handle here
68 if (fence->error == -EBUSY)
71 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
72 (hl_cs_cmpl->type == CS_TYPE_WAIT)) {
75 "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
78 hl_cs_cmpl->hw_sob->sob_id,
82 * A signal CS can get completion while the corresponding wait
83 * for signal CS is on its way to the PQ. The wait for signal CS
84 * will get stuck if the signal CS incremented the SOB to its
85 * max value and there are no pending (submitted) waits on this
87 * We do the following to void this situation:
88 * 1. The wait for signal CS must get a ref for the signal CS as
89 * soon as possible in cs_ioctl_signal_wait() and put it
90 * before being submitted to the PQ but after it incremented
91 * the SOB refcnt in init_signal_wait_cs().
92 * 2. Signal/Wait for signal CS will decrement the SOB refcnt
94 * These two measures guarantee that the wait for signal CS will
95 * reset the SOB upon completion rather than the signal CS and
96 * hence the above scenario is avoided.
98 kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
102 kfree_rcu(hl_cs_cmpl, base_fence.rcu);
105 static const struct dma_fence_ops hl_fence_ops = {
106 .get_driver_name = hl_fence_get_driver_name,
107 .get_timeline_name = hl_fence_get_timeline_name,
108 .enable_signaling = hl_fence_enable_signaling,
109 .release = hl_fence_release
112 static void cs_get(struct hl_cs *cs)
114 kref_get(&cs->refcount);
117 static int cs_get_unless_zero(struct hl_cs *cs)
119 return kref_get_unless_zero(&cs->refcount);
122 static void cs_put(struct hl_cs *cs)
124 kref_put(&cs->refcount, cs_do_release);
127 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
130 * Patched CB is created for external queues jobs, and for H/W queues
131 * jobs if the user CB was allocated by driver and MMU is disabled.
133 return (job->queue_type == QUEUE_TYPE_EXT ||
134 (job->queue_type == QUEUE_TYPE_HW &&
135 job->is_kernel_allocated_cb &&
140 * cs_parser - parse the user command submission
142 * @hpriv : pointer to the private data of the fd
143 * @job : pointer to the job that holds the command submission info
145 * The function parses the command submission of the user. It calls the
146 * ASIC specific parser, which returns a list of memory blocks to send
147 * to the device as different command buffers
150 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
152 struct hl_device *hdev = hpriv->hdev;
153 struct hl_cs_parser parser;
156 parser.ctx_id = job->cs->ctx->asid;
157 parser.cs_sequence = job->cs->sequence;
158 parser.job_id = job->id;
160 parser.hw_queue_id = job->hw_queue_id;
161 parser.job_userptr_list = &job->userptr_list;
162 parser.patched_cb = NULL;
163 parser.user_cb = job->user_cb;
164 parser.user_cb_size = job->user_cb_size;
165 parser.queue_type = job->queue_type;
166 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
167 job->patched_cb = NULL;
169 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
171 if (is_cb_patched(hdev, job)) {
173 job->patched_cb = parser.patched_cb;
174 job->job_cb_size = parser.patched_cb_size;
175 job->contains_dma_pkt = parser.contains_dma_pkt;
177 spin_lock(&job->patched_cb->lock);
178 job->patched_cb->cs_cnt++;
179 spin_unlock(&job->patched_cb->lock);
183 * Whether the parsing worked or not, we don't need the
184 * original CB anymore because it was already parsed and
185 * won't be accessed again for this CS
187 spin_lock(&job->user_cb->lock);
188 job->user_cb->cs_cnt--;
189 spin_unlock(&job->user_cb->lock);
190 hl_cb_put(job->user_cb);
193 job->job_cb_size = job->user_cb_size;
199 static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
201 struct hl_cs *cs = job->cs;
203 if (is_cb_patched(hdev, job)) {
204 hl_userptr_delete_list(hdev, &job->userptr_list);
207 * We might arrive here from rollback and patched CB wasn't
208 * created, so we need to check it's not NULL
210 if (job->patched_cb) {
211 spin_lock(&job->patched_cb->lock);
212 job->patched_cb->cs_cnt--;
213 spin_unlock(&job->patched_cb->lock);
215 hl_cb_put(job->patched_cb);
219 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
220 * enabled, the user CB isn't released in cs_parser() and thus should be
223 if (job->queue_type == QUEUE_TYPE_HW &&
224 job->is_kernel_allocated_cb && hdev->mmu_enable) {
225 spin_lock(&job->user_cb->lock);
226 job->user_cb->cs_cnt--;
227 spin_unlock(&job->user_cb->lock);
229 hl_cb_put(job->user_cb);
233 * This is the only place where there can be multiple threads
234 * modifying the list at the same time
236 spin_lock(&cs->job_lock);
237 list_del(&job->cs_node);
238 spin_unlock(&cs->job_lock);
240 hl_debugfs_remove_job(hdev, job);
242 if (job->queue_type == QUEUE_TYPE_EXT ||
243 job->queue_type == QUEUE_TYPE_HW)
249 static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
251 hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
252 ctx->cs_counters.device_in_reset_drop_cnt;
253 hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
254 ctx->cs_counters.out_of_mem_drop_cnt;
255 hdev->aggregated_cs_counters.parsing_drop_cnt +=
256 ctx->cs_counters.parsing_drop_cnt;
257 hdev->aggregated_cs_counters.queue_full_drop_cnt +=
258 ctx->cs_counters.queue_full_drop_cnt;
261 static void cs_do_release(struct kref *ref)
263 struct hl_cs *cs = container_of(ref, struct hl_cs,
265 struct hl_device *hdev = cs->ctx->hdev;
266 struct hl_cs_job *job, *tmp;
268 cs->completed = true;
271 * Although if we reached here it means that all external jobs have
272 * finished, because each one of them took refcnt to CS, we still
273 * need to go over the internal jobs and free them. Otherwise, we
274 * will have leaked memory and what's worse, the CS object (and
275 * potentially the CTX object) could be released, while the JOB
276 * still holds a pointer to them (but no reference).
278 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
281 /* We also need to update CI for internal queues */
283 hdev->asic_funcs->hw_queues_lock(hdev);
285 hdev->cs_active_cnt--;
286 if (!hdev->cs_active_cnt) {
287 struct hl_device_idle_busy_ts *ts;
289 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
290 ts->busy_to_idle_ts = ktime_get();
292 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
293 hdev->idle_busy_ts_idx = 0;
294 } else if (hdev->cs_active_cnt < 0) {
295 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
296 hdev->cs_active_cnt);
299 hdev->asic_funcs->hw_queues_unlock(hdev);
301 hl_int_hw_queue_update_ci(cs);
303 spin_lock(&hdev->hw_queues_mirror_lock);
304 /* remove CS from hw_queues mirror list */
305 list_del_init(&cs->mirror_node);
306 spin_unlock(&hdev->hw_queues_mirror_lock);
309 * Don't cancel TDR in case this CS was timedout because we
310 * might be running from the TDR context
312 if ((!cs->timedout) &&
313 (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
317 cancel_delayed_work_sync(&cs->work_tdr);
319 spin_lock(&hdev->hw_queues_mirror_lock);
321 /* queue TDR for next CS */
322 next = list_first_entry_or_null(
323 &hdev->hw_queues_mirror_list,
324 struct hl_cs, mirror_node);
326 if ((next) && (!next->tdr_active)) {
327 next->tdr_active = true;
328 schedule_delayed_work(&next->work_tdr,
329 hdev->timeout_jiffies);
332 spin_unlock(&hdev->hw_queues_mirror_lock);
334 } else if (cs->type == CS_TYPE_WAIT) {
336 * In case the wait for signal CS was submitted, the put occurs
337 * in init_signal_wait_cs() right before hanging on the PQ.
339 dma_fence_put(cs->signal_fence);
343 * Must be called before hl_ctx_put because inside we use ctx to get
346 hl_debugfs_remove_cs(cs);
350 /* We need to mark an error for not submitted because in that case
351 * the dma fence release flow is different. Mainly, we don't need
352 * to handle hw_sob for signal/wait
355 dma_fence_set_error(cs->fence, -ETIMEDOUT);
356 else if (cs->aborted)
357 dma_fence_set_error(cs->fence, -EIO);
358 else if (!cs->submitted)
359 dma_fence_set_error(cs->fence, -EBUSY);
361 dma_fence_signal(cs->fence);
362 dma_fence_put(cs->fence);
364 cs_counters_aggregate(hdev, cs->ctx);
366 kfree(cs->jobs_in_queue_cnt);
370 static void cs_timedout(struct work_struct *work)
372 struct hl_device *hdev;
374 struct hl_cs *cs = container_of(work, struct hl_cs,
376 rc = cs_get_unless_zero(cs);
380 if ((!cs->submitted) || (cs->completed)) {
385 /* Mark the CS is timed out so we won't try to cancel its TDR */
388 hdev = cs->ctx->hdev;
389 ctx_asid = cs->ctx->asid;
392 "Command submission %llu has not finished in time!\n",
397 if (hdev->reset_on_lockup)
398 hl_device_reset(hdev, false, false);
401 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
402 enum hl_cs_type cs_type, struct hl_cs **cs_new)
404 struct hl_cs_compl *cs_cmpl;
405 struct dma_fence *other = NULL;
409 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
414 cs->submitted = false;
415 cs->completed = false;
417 INIT_LIST_HEAD(&cs->job_list);
418 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
419 kref_init(&cs->refcount);
420 spin_lock_init(&cs->job_lock);
422 cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
428 cs_cmpl->hdev = hdev;
429 cs_cmpl->type = cs->type;
430 spin_lock_init(&cs_cmpl->lock);
431 cs->fence = &cs_cmpl->base_fence;
433 spin_lock(&ctx->cs_lock);
435 cs_cmpl->cs_seq = ctx->cs_sequence;
436 other = ctx->cs_pending[cs_cmpl->cs_seq &
437 (hdev->asic_prop.max_pending_cs - 1)];
438 if ((other) && (!dma_fence_is_signaled(other))) {
440 "Rejecting CS because of too many in-flights CS\n");
445 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
446 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
447 if (!cs->jobs_in_queue_cnt) {
452 dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
453 ctx->asid, ctx->cs_sequence);
455 cs->sequence = cs_cmpl->cs_seq;
457 ctx->cs_pending[cs_cmpl->cs_seq &
458 (hdev->asic_prop.max_pending_cs - 1)] =
459 &cs_cmpl->base_fence;
462 dma_fence_get(&cs_cmpl->base_fence);
464 dma_fence_put(other);
466 spin_unlock(&ctx->cs_lock);
473 spin_unlock(&ctx->cs_lock);
480 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
482 struct hl_cs_job *job, *tmp;
484 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
488 void hl_cs_rollback_all(struct hl_device *hdev)
491 struct hl_cs *cs, *tmp;
493 /* flush all completions */
494 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
495 flush_workqueue(hdev->cq_wq[i]);
497 /* Make sure we don't have leftovers in the H/W queues mirror list */
498 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
502 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
503 cs->ctx->asid, cs->sequence);
504 cs_rollback(hdev, cs);
509 static void job_wq_completion(struct work_struct *work)
511 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
513 struct hl_cs *cs = job->cs;
514 struct hl_device *hdev = cs->ctx->hdev;
516 /* job is no longer needed */
520 static int validate_queue_index(struct hl_device *hdev,
521 struct hl_cs_chunk *chunk,
522 enum hl_queue_type *queue_type,
523 bool *is_kernel_allocated_cb)
525 struct asic_fixed_properties *asic = &hdev->asic_prop;
526 struct hw_queue_properties *hw_queue_prop;
528 /* This must be checked here to prevent out-of-bounds access to
529 * hw_queues_props array
531 if (chunk->queue_index >= asic->max_queues) {
532 dev_err(hdev->dev, "Queue index %d is invalid\n",
537 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
539 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
540 dev_err(hdev->dev, "Queue index %d is invalid\n",
545 if (hw_queue_prop->driver_only) {
547 "Queue index %d is restricted for the kernel driver\n",
552 *queue_type = hw_queue_prop->type;
553 *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
558 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
559 struct hl_cb_mgr *cb_mgr,
560 struct hl_cs_chunk *chunk)
565 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
567 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
569 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
573 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
574 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
578 spin_lock(&cb->lock);
580 spin_unlock(&cb->lock);
589 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
590 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
592 struct hl_cs_job *job;
594 job = kzalloc(sizeof(*job), GFP_ATOMIC);
598 job->queue_type = queue_type;
599 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
601 if (is_cb_patched(hdev, job))
602 INIT_LIST_HEAD(&job->userptr_list);
604 if (job->queue_type == QUEUE_TYPE_EXT)
605 INIT_WORK(&job->finish_work, job_wq_completion);
610 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
611 u32 num_chunks, u64 *cs_seq)
613 struct hl_device *hdev = hpriv->hdev;
614 struct hl_cs_chunk *cs_chunk_array;
615 struct hl_cs_job *job;
618 bool int_queues_only = true;
622 *cs_seq = ULLONG_MAX;
624 if (num_chunks > HL_MAX_JOBS_PER_CS) {
626 "Number of chunks can NOT be larger than %d\n",
632 cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
634 if (!cs_chunk_array) {
639 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
640 if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
641 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
643 goto free_cs_chunk_array;
646 /* increment refcnt for context */
647 hl_ctx_get(hdev, hpriv->ctx);
649 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
651 hl_ctx_put(hpriv->ctx);
652 goto free_cs_chunk_array;
655 *cs_seq = cs->sequence;
657 hl_debugfs_add_cs(cs);
659 /* Validate ALL the CS chunks before submitting the CS */
660 for (i = 0 ; i < num_chunks ; i++) {
661 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
662 enum hl_queue_type queue_type;
663 bool is_kernel_allocated_cb;
665 rc = validate_queue_index(hdev, chunk, &queue_type,
666 &is_kernel_allocated_cb);
668 hpriv->ctx->cs_counters.parsing_drop_cnt++;
672 if (is_kernel_allocated_cb) {
673 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
675 hpriv->ctx->cs_counters.parsing_drop_cnt++;
680 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
683 if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
684 int_queues_only = false;
686 job = hl_cs_allocate_job(hdev, queue_type,
687 is_kernel_allocated_cb);
689 hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
690 dev_err(hdev->dev, "Failed to allocate a new job\n");
692 if (is_kernel_allocated_cb)
701 job->user_cb_size = chunk->cb_size;
702 job->hw_queue_id = chunk->queue_index;
704 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
706 list_add_tail(&job->cs_node, &cs->job_list);
709 * Increment CS reference. When CS reference is 0, CS is
710 * done and can be signaled to user and free all its resources
711 * Only increment for JOB on external or H/W queues, because
712 * only for those JOBs we get completion
714 if (job->queue_type == QUEUE_TYPE_EXT ||
715 job->queue_type == QUEUE_TYPE_HW)
718 hl_debugfs_add_job(hdev, job);
720 rc = cs_parser(hpriv, job);
722 hpriv->ctx->cs_counters.parsing_drop_cnt++;
724 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
725 cs->ctx->asid, cs->sequence, job->id, rc);
730 if (int_queues_only) {
731 hpriv->ctx->cs_counters.parsing_drop_cnt++;
733 "Reject CS %d.%llu because only internal queues jobs are present\n",
734 cs->ctx->asid, cs->sequence);
739 rc = hl_hw_queue_schedule_cs(cs);
743 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
744 cs->ctx->asid, cs->sequence, rc);
748 rc = HL_CS_STATUS_SUCCESS;
752 spin_lock(&cb->lock);
754 spin_unlock(&cb->lock);
757 cs_rollback(hdev, cs);
758 *cs_seq = ULLONG_MAX;
759 /* The path below is both for good and erroneous exits */
761 /* We finished with the CS in this function, so put the ref */
764 kfree(cs_chunk_array);
769 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
770 void __user *chunks, u32 num_chunks,
773 struct hl_device *hdev = hpriv->hdev;
774 struct hl_ctx *ctx = hpriv->ctx;
775 struct hl_cs_chunk *cs_chunk_array, *chunk;
776 struct hw_queue_properties *hw_queue_prop;
777 struct dma_fence *sig_fence = NULL;
778 struct hl_cs_job *job;
781 enum hl_queue_type q_type;
782 u64 *signal_seq_arr = NULL, signal_seq;
783 u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
786 *cs_seq = ULLONG_MAX;
788 if (num_chunks > HL_MAX_JOBS_PER_CS) {
790 "Number of chunks can NOT be larger than %d\n",
796 cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
798 if (!cs_chunk_array) {
803 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
804 if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
805 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
807 goto free_cs_chunk_array;
810 /* currently it is guaranteed to have only one chunk */
811 chunk = &cs_chunk_array[0];
812 q_idx = chunk->queue_index;
813 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
814 q_type = hw_queue_prop->type;
816 if ((q_idx >= hdev->asic_prop.max_queues) ||
817 (!hw_queue_prop->supports_sync_stream)) {
818 dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
820 goto free_cs_chunk_array;
823 if (cs_type == CS_TYPE_WAIT) {
824 struct hl_cs_compl *sig_waitcs_cmpl;
826 signal_seq_arr_len = chunk->num_signal_seq_arr;
828 /* currently only one signal seq is supported */
829 if (signal_seq_arr_len != 1) {
831 "Wait for signal CS supports only one signal CS seq\n");
833 goto free_cs_chunk_array;
836 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
837 sizeof(*signal_seq_arr),
839 if (!signal_seq_arr) {
841 goto free_cs_chunk_array;
844 size_to_copy = chunk->num_signal_seq_arr *
845 sizeof(*signal_seq_arr);
846 if (copy_from_user(signal_seq_arr,
847 u64_to_user_ptr(chunk->signal_seq_arr),
850 "Failed to copy signal seq array from user\n");
852 goto free_signal_seq_array;
855 /* currently it is guaranteed to have only one signal seq */
856 signal_seq = signal_seq_arr[0];
857 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
858 if (IS_ERR(sig_fence)) {
860 "Failed to get signal CS with seq 0x%llx\n",
862 rc = PTR_ERR(sig_fence);
863 goto free_signal_seq_array;
867 /* signal CS already finished */
869 goto free_signal_seq_array;
873 container_of(sig_fence, struct hl_cs_compl, base_fence);
875 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
877 "CS seq 0x%llx is not of a signal CS\n",
879 dma_fence_put(sig_fence);
881 goto free_signal_seq_array;
884 if (dma_fence_is_signaled(sig_fence)) {
885 /* signal CS already finished */
886 dma_fence_put(sig_fence);
888 goto free_signal_seq_array;
892 /* increment refcnt for context */
893 hl_ctx_get(hdev, ctx);
895 rc = allocate_cs(hdev, ctx, cs_type, &cs);
897 if (cs_type == CS_TYPE_WAIT)
898 dma_fence_put(sig_fence);
900 goto free_signal_seq_array;
904 * Save the signal CS fence for later initialization right before
905 * hanging the wait CS on the queue.
907 if (cs->type == CS_TYPE_WAIT)
908 cs->signal_fence = sig_fence;
910 hl_debugfs_add_cs(cs);
912 *cs_seq = cs->sequence;
914 job = hl_cs_allocate_job(hdev, q_type, true);
916 ctx->cs_counters.out_of_mem_drop_cnt++;
917 dev_err(hdev->dev, "Failed to allocate a new job\n");
922 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
924 ctx->cs_counters.out_of_mem_drop_cnt++;
930 if (cs->type == CS_TYPE_WAIT)
931 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
933 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
938 job->user_cb->cs_cnt++;
939 job->user_cb_size = cb_size;
940 job->hw_queue_id = q_idx;
943 * No need in parsing, user CB is the patched CB.
944 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
945 * the CB idr anymore and to decrement its refcount as it was
946 * incremented inside hl_cb_kernel_create().
948 job->patched_cb = job->user_cb;
949 job->job_cb_size = job->user_cb_size;
950 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
952 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
954 list_add_tail(&job->cs_node, &cs->job_list);
956 /* increment refcount as for external queues we get completion */
959 hl_debugfs_add_job(hdev, job);
961 rc = hl_hw_queue_schedule_cs(cs);
965 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
966 ctx->asid, cs->sequence, rc);
970 rc = HL_CS_STATUS_SUCCESS;
974 cs_rollback(hdev, cs);
975 *cs_seq = ULLONG_MAX;
976 /* The path below is both for good and erroneous exits */
978 /* We finished with the CS in this function, so put the ref */
980 free_signal_seq_array:
981 if (cs_type == CS_TYPE_WAIT)
982 kfree(signal_seq_arr);
984 kfree(cs_chunk_array);
989 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
991 struct hl_device *hdev = hpriv->hdev;
992 union hl_cs_args *args = data;
993 struct hl_ctx *ctx = hpriv->ctx;
994 void __user *chunks_execute, *chunks_restore;
995 enum hl_cs_type cs_type;
996 u32 num_chunks_execute, num_chunks_restore, sig_wait_flags;
997 u64 cs_seq = ULONG_MAX;
998 int rc, do_ctx_switch;
999 bool need_soft_reset = false;
1001 if (hl_device_disabled_or_in_reset(hdev)) {
1002 dev_warn_ratelimited(hdev->dev,
1003 "Device is %s. Can't submit new CS\n",
1004 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1009 sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT;
1011 if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) {
1013 "Signal and wait CS flags are mutually exclusive, context %d\n",
1019 if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) &&
1020 (!hdev->supports_sync_stream))) {
1021 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1026 if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL)
1027 cs_type = CS_TYPE_SIGNAL;
1028 else if (args->in.cs_flags & HL_CS_FLAGS_WAIT)
1029 cs_type = CS_TYPE_WAIT;
1031 cs_type = CS_TYPE_DEFAULT;
1033 chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
1034 num_chunks_execute = args->in.num_chunks_execute;
1036 if (cs_type == CS_TYPE_DEFAULT) {
1037 if (!num_chunks_execute) {
1039 "Got execute CS with 0 chunks, context %d\n",
1044 } else if (num_chunks_execute != 1) {
1046 "Sync stream CS mandates one chunk only, context %d\n",
1052 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1054 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1058 (void __user *) (uintptr_t) args->in.chunks_restore;
1059 num_chunks_restore = args->in.num_chunks_restore;
1061 mutex_lock(&hpriv->restore_phase_mutex);
1063 if (do_ctx_switch) {
1064 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1066 dev_err_ratelimited(hdev->dev,
1067 "Failed to switch to context %d, rejecting CS! %d\n",
1070 * If we timedout, or if the device is not IDLE
1071 * while we want to do context-switch (-EBUSY),
1072 * we need to soft-reset because QMAN is
1073 * probably stuck. However, we can't call to
1074 * reset here directly because of deadlock, so
1075 * need to do it at the very end of this
1078 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1079 need_soft_reset = true;
1080 mutex_unlock(&hpriv->restore_phase_mutex);
1085 hdev->asic_funcs->restore_phase_topology(hdev);
1087 if (!num_chunks_restore) {
1089 "Need to run restore phase but restore CS is empty\n");
1092 rc = cs_ioctl_default(hpriv, chunks_restore,
1093 num_chunks_restore, &cs_seq);
1096 mutex_unlock(&hpriv->restore_phase_mutex);
1100 "Failed to submit restore CS for context %d (%d)\n",
1105 /* Need to wait for restore completion before execution phase */
1106 if (num_chunks_restore) {
1107 ret = _hl_cs_wait_ioctl(hdev, ctx,
1108 jiffies_to_usecs(hdev->timeout_jiffies),
1112 "Restore CS for context %d failed to complete %ld\n",
1119 ctx->thread_ctx_switch_wait_token = 1;
1120 } else if (!ctx->thread_ctx_switch_wait_token) {
1123 rc = hl_poll_timeout_memory(hdev,
1124 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1125 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1127 if (rc == -ETIMEDOUT) {
1129 "context switch phase timeout (%d)\n", tmp);
1134 if (cs_type == CS_TYPE_DEFAULT)
1135 rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute,
1138 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute,
1139 num_chunks_execute, &cs_seq);
1142 if (rc != -EAGAIN) {
1143 memset(args, 0, sizeof(*args));
1144 args->out.status = rc;
1145 args->out.seq = cs_seq;
1148 if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
1149 hl_device_reset(hdev, false, false);
1154 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
1155 struct hl_ctx *ctx, u64 timeout_us, u64 seq)
1157 struct dma_fence *fence;
1158 unsigned long timeout;
1161 if (timeout_us == MAX_SCHEDULE_TIMEOUT)
1162 timeout = timeout_us;
1164 timeout = usecs_to_jiffies(timeout_us);
1166 hl_ctx_get(hdev, ctx);
1168 fence = hl_ctx_get_fence(ctx, seq);
1169 if (IS_ERR(fence)) {
1170 rc = PTR_ERR(fence);
1172 dev_notice_ratelimited(hdev->dev,
1173 "Can't wait on CS %llu because current CS is at seq %llu\n",
1174 seq, ctx->cs_sequence);
1176 rc = dma_fence_wait_timeout(fence, true, timeout);
1177 if (fence->error == -ETIMEDOUT)
1179 else if (fence->error == -EIO)
1181 dma_fence_put(fence);
1184 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
1185 seq, ctx->cs_sequence);
1194 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
1196 struct hl_device *hdev = hpriv->hdev;
1197 union hl_wait_cs_args *args = data;
1198 u64 seq = args->in.seq;
1201 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
1203 memset(args, 0, sizeof(*args));
1206 if (rc == -ERESTARTSYS) {
1207 dev_err_ratelimited(hdev->dev,
1208 "user process got signal while waiting for CS handle %llu\n",
1210 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
1212 } else if (rc == -ETIMEDOUT) {
1213 dev_err_ratelimited(hdev->dev,
1214 "CS %llu has timed-out while user process is waiting for it\n",
1216 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
1217 } else if (rc == -EIO) {
1218 dev_err_ratelimited(hdev->dev,
1219 "CS %llu has been aborted while user process is waiting for it\n",
1221 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
1227 args->out.status = HL_WAIT_CS_STATUS_BUSY;
1229 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;