1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
15 * @ptr: the current pi/ci value
16 * @val: the amount to add
18 * Add val to ptr. It can go until twice the queue length.
20 inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
23 ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
26 static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
28 return atomic_read(ci) & ((queue_len << 1) - 1);
31 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
36 return (queue_len - delta);
38 return (abs(delta) - queue_len);
41 void hl_hw_queue_update_ci(struct hl_cs *cs)
43 struct hl_device *hdev = cs->ctx->hdev;
44 struct hl_hw_queue *q;
50 q = &hdev->kernel_queues[0];
52 /* There are no internal queues if H/W queues are being used */
53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
56 /* We must increment CI for every queue that will never get a
57 * completion, there are 2 scenarios this can happen:
58 * 1. All queues of a non completion CS will never get a completion.
59 * 2. Internal queues never gets completion.
61 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT)
63 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
68 * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
70 * @hdev: pointer to habanalabs device structure
71 * @q: pointer to habanalabs queue structure
72 * @ctl: BD's control word
76 * This function assumes there is enough space on the queue to submit a new
77 * BD to it. It initializes the next BD and calls the device specific
78 * function to set the pi (and doorbell)
80 * This function must be called when the scheduler mutex is taken
83 static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
84 struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
88 bd = q->kernel_address;
89 bd += hl_pi_2_offset(q->pi);
90 bd->ctl = cpu_to_le32(ctl);
91 bd->len = cpu_to_le32(len);
92 bd->ptr = cpu_to_le64(ptr);
94 q->pi = hl_queue_inc_ptr(q->pi);
95 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
99 * ext_queue_sanity_checks - perform some sanity checks on external queue
101 * @hdev : pointer to hl_device structure
102 * @q : pointer to hl_hw_queue structure
103 * @num_of_entries : how many entries to check for space
104 * @reserve_cq_entry : whether to reserve an entry in the cq
106 * H/W queues spinlock should be taken before calling this function
108 * Perform the following:
109 * - Make sure we have enough space in the h/w queue
110 * - Make sure we have enough space in the completion queue
111 * - Reserve space in the completion queue (needs to be reversed if there
112 * is a failure down the road before the actual submission of work). Only
113 * do this action if reserve_cq_entry is true
116 static int ext_queue_sanity_checks(struct hl_device *hdev,
117 struct hl_hw_queue *q, int num_of_entries,
118 bool reserve_cq_entry)
120 atomic_t *free_slots =
121 &hdev->completion_queue[q->cq_id].free_slots_cnt;
124 /* Check we have enough space in the queue */
125 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
127 if (free_slots_cnt < num_of_entries) {
128 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
129 q->hw_queue_id, num_of_entries);
133 if (reserve_cq_entry) {
135 * Check we have enough space in the completion queue
136 * Add -1 to counter (decrement) unless counter was already 0
137 * In that case, CQ is full so we can't submit a new CB because
138 * we won't get ack on its completion
139 * atomic_add_unless will return 0 if counter was already 0
141 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
142 dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
143 num_of_entries, q->hw_queue_id);
144 atomic_add(num_of_entries, free_slots);
153 * int_queue_sanity_checks - perform some sanity checks on internal queue
155 * @hdev : pointer to hl_device structure
156 * @q : pointer to hl_hw_queue structure
157 * @num_of_entries : how many entries to check for space
159 * H/W queues spinlock should be taken before calling this function
161 * Perform the following:
162 * - Make sure we have enough space in the h/w queue
165 static int int_queue_sanity_checks(struct hl_device *hdev,
166 struct hl_hw_queue *q,
171 if (num_of_entries > q->int_queue_len) {
173 "Cannot populate queue %u with %u jobs\n",
174 q->hw_queue_id, num_of_entries);
178 /* Check we have enough space in the queue */
179 free_slots_cnt = queue_free_slots(q, q->int_queue_len);
181 if (free_slots_cnt < num_of_entries) {
182 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
183 q->hw_queue_id, num_of_entries);
191 * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
192 * @hdev: Pointer to hl_device structure.
193 * @q: Pointer to hl_hw_queue structure.
194 * @num_of_entries: How many entries to check for space.
196 * Notice: We do not reserve queue entries so this function mustn't be called
197 * more than once per CS for the same queue
200 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
205 /* Check we have enough space in the queue */
206 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
208 if (free_slots_cnt < num_of_entries) {
209 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
210 q->hw_queue_id, num_of_entries);
218 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
220 * @hdev: pointer to hl_device structure
221 * @hw_queue_id: Queue's type
222 * @cb_size: size of CB
223 * @cb_ptr: pointer to CB location
225 * This function sends a single CB, that must NOT generate a completion entry
228 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
229 u32 cb_size, u64 cb_ptr)
231 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
235 * The CPU queue is a synchronous queue with an effective depth of
236 * a single entry (although it is allocated with room for multiple
237 * entries). Therefore, there is a different lock, called
238 * send_cpu_message_lock, that serializes accesses to the CPU queue.
239 * As a result, we don't need to lock the access to the entire H/W
240 * queues module when submitting a JOB to the CPU queue
242 if (q->queue_type != QUEUE_TYPE_CPU)
243 hdev->asic_funcs->hw_queues_lock(hdev);
245 if (hdev->disabled) {
251 * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
252 * type only on init phase, when the queues are empty and being tested,
253 * so there is no need for sanity checks.
255 if (q->queue_type != QUEUE_TYPE_HW) {
256 rc = ext_queue_sanity_checks(hdev, q, 1, false);
261 ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
264 if (q->queue_type != QUEUE_TYPE_CPU)
265 hdev->asic_funcs->hw_queues_unlock(hdev);
271 * ext_queue_schedule_job - submit a JOB to an external queue
273 * @job: pointer to the job that needs to be submitted to the queue
275 * This function must be called when the scheduler mutex is taken
278 static void ext_queue_schedule_job(struct hl_cs_job *job)
280 struct hl_device *hdev = job->cs->ctx->hdev;
281 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
282 struct hl_cq_entry cq_pkt;
291 * Update the JOB ID inside the BD CTL so the device would know what
292 * to write in the completion queue
294 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
296 cb = job->patched_cb;
297 len = job->job_cb_size;
298 ptr = cb->bus_address;
300 /* Skip completion flow in case this is a non completion CS */
301 if (!cs_needs_completion(job->cs))
304 cq_pkt.data = cpu_to_le32(
305 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
306 & CQ_ENTRY_SHADOW_INDEX_MASK) |
307 FIELD_PREP(CQ_ENTRY_SHADOW_INDEX_VALID_MASK, 1) |
308 FIELD_PREP(CQ_ENTRY_READY_MASK, 1));
311 * No need to protect pi_offset because scheduling to the
312 * H/W queues is done under the scheduler mutex
314 * No need to check if CQ is full because it was already
315 * checked in ext_queue_sanity_checks
317 cq = &hdev->completion_queue[q->cq_id];
318 cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
320 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
322 le32_to_cpu(cq_pkt.data),
324 job->contains_dma_pkt);
326 q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
328 cq->pi = hl_cq_inc_ptr(cq->pi);
331 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
335 * int_queue_schedule_job - submit a JOB to an internal queue
337 * @job: pointer to the job that needs to be submitted to the queue
339 * This function must be called when the scheduler mutex is taken
342 static void int_queue_schedule_job(struct hl_cs_job *job)
344 struct hl_device *hdev = job->cs->ctx->hdev;
345 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
350 bd.len = cpu_to_le32(job->job_cb_size);
352 if (job->is_kernel_allocated_cb)
353 /* bus_address is actually a mmu mapped address
354 * allocated from an internal pool
356 bd.ptr = cpu_to_le64(job->user_cb->bus_address);
358 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
360 pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd);
363 q->pi &= ((q->int_queue_len << 1) - 1);
365 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
367 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
371 * hw_queue_schedule_job - submit a JOB to a H/W queue
373 * @job: pointer to the job that needs to be submitted to the queue
375 * This function must be called when the scheduler mutex is taken
378 static void hw_queue_schedule_job(struct hl_cs_job *job)
380 struct hl_device *hdev = job->cs->ctx->hdev;
381 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
383 u32 offset, ctl, len;
386 * Upon PQE completion, COMP_DATA is used as the write data to the
387 * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
388 * write address offset in the SM block (QMAN LBW message).
389 * The write address offset is calculated as "COMP_OFFSET << 2".
391 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
392 ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
393 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
395 len = job->job_cb_size;
398 * A patched CB is created only if a user CB was allocated by driver and
399 * MMU is disabled. If MMU is enabled, the user CB should be used
400 * instead. If the user CB wasn't allocated by driver, assume that it
404 ptr = job->patched_cb->bus_address;
405 else if (job->is_kernel_allocated_cb)
406 ptr = job->user_cb->bus_address;
408 ptr = (u64) (uintptr_t) job->user_cb;
410 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
413 static void init_signal_cs(struct hl_device *hdev,
414 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
416 struct hl_sync_stream_properties *prop;
417 struct hl_hw_sob *hw_sob;
420 q_idx = job->hw_queue_id;
421 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
422 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
424 cs_cmpl->hw_sob = hw_sob;
425 cs_cmpl->sob_val = prop->next_sob_val++;
428 "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
429 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
431 /* we set an EB since we must make sure all oeprations are done
432 * when sending the signal
434 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
435 cs_cmpl->hw_sob->sob_id, 0, true);
437 kref_get(&hw_sob->kref);
439 /* check for wraparound */
440 if (prop->next_sob_val == HL_MAX_SOB_VAL) {
442 * Decrement as we reached the max value.
443 * The release function won't be called here as we've
444 * just incremented the refcount.
446 kref_put(&hw_sob->kref, hl_sob_reset_error);
447 prop->next_sob_val = 1;
448 /* only two SOBs are currently in use */
449 prop->curr_sob_offset =
450 (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
452 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
453 prop->curr_sob_offset, q_idx);
457 static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
458 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
460 struct hl_cs_compl *signal_cs_cmpl;
461 struct hl_sync_stream_properties *prop;
462 struct hl_gen_wait_properties wait_prop;
465 q_idx = job->hw_queue_id;
466 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
468 signal_cs_cmpl = container_of(cs->signal_fence,
472 /* copy the SOB id and value of the signal CS */
473 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
474 cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
477 "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
478 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
479 prop->base_mon_id, q_idx);
481 wait_prop.data = (void *) job->patched_cb;
482 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
483 wait_prop.sob_mask = 0x1;
484 wait_prop.sob_val = cs_cmpl->sob_val;
485 wait_prop.mon_id = prop->base_mon_id;
486 wait_prop.q_idx = q_idx;
488 hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
490 kref_get(&cs_cmpl->hw_sob->kref);
492 * Must put the signal fence after the SOB refcnt increment so
493 * the SOB refcnt won't turn 0 and reset the SOB before the
494 * wait CS was submitted.
497 hl_fence_put(cs->signal_fence);
498 cs->signal_fence = NULL;
502 * init_signal_wait_cs - initialize a signal/wait CS
503 * @cs: pointer to the signal/wait CS
505 * H/W queues spinlock should be taken before calling this function
507 static void init_signal_wait_cs(struct hl_cs *cs)
509 struct hl_ctx *ctx = cs->ctx;
510 struct hl_device *hdev = ctx->hdev;
511 struct hl_cs_job *job;
512 struct hl_cs_compl *cs_cmpl =
513 container_of(cs->fence, struct hl_cs_compl, base_fence);
515 /* There is only one job in a signal/wait CS */
516 job = list_first_entry(&cs->job_list, struct hl_cs_job,
519 if (cs->type & CS_TYPE_SIGNAL)
520 init_signal_cs(hdev, job, cs_cmpl);
521 else if (cs->type & CS_TYPE_WAIT)
522 init_wait_cs(hdev, cs, job, cs_cmpl);
526 * hl_hw_queue_schedule_cs - schedule a command submission
527 * @cs: pointer to the CS
529 int hl_hw_queue_schedule_cs(struct hl_cs *cs)
531 enum hl_device_status status;
532 struct hl_cs_counters_atomic *cntr;
533 struct hl_ctx *ctx = cs->ctx;
534 struct hl_device *hdev = ctx->hdev;
535 struct hl_cs_job *job, *tmp;
536 struct hl_hw_queue *q;
537 int rc = 0, i, cq_cnt;
541 cntr = &hdev->aggregated_cs_counters;
543 hdev->asic_funcs->hw_queues_lock(hdev);
545 if (!hl_device_operational(hdev, &status)) {
546 atomic64_inc(&cntr->device_in_reset_drop_cnt);
547 atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt);
549 "device is %s, CS rejected!\n", hdev->status[status]);
554 max_queues = hdev->asic_prop.max_queues;
556 q = &hdev->kernel_queues[0];
557 for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
558 if (cs->jobs_in_queue_cnt[i]) {
559 switch (q->queue_type) {
561 rc = ext_queue_sanity_checks(hdev, q,
562 cs->jobs_in_queue_cnt[i],
563 cs_needs_completion(cs) ?
567 rc = int_queue_sanity_checks(hdev, q,
568 cs->jobs_in_queue_cnt[i]);
571 rc = hw_queue_sanity_checks(hdev, q,
572 cs->jobs_in_queue_cnt[i]);
575 dev_err(hdev->dev, "Queue type %d is invalid\n",
583 &ctx->cs_counters.queue_full_drop_cnt);
584 atomic64_inc(&cntr->queue_full_drop_cnt);
588 if (q->queue_type == QUEUE_TYPE_EXT)
593 if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT))
594 init_signal_wait_cs(cs);
595 else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
596 hdev->asic_funcs->collective_wait_init_cs(cs);
598 spin_lock(&hdev->cs_mirror_lock);
600 /* Verify staged CS exists and add to the staged list */
601 if (cs->staged_cs && !cs->staged_first) {
602 struct hl_cs *staged_cs;
604 staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
607 "Cannot find staged submission sequence %llu",
608 cs->staged_sequence);
610 goto unlock_cs_mirror;
613 if (is_staged_cs_last_exists(hdev, staged_cs)) {
615 "Staged submission sequence %llu already submitted",
616 cs->staged_sequence);
618 goto unlock_cs_mirror;
621 list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
624 list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
626 /* Queue TDR if the CS is the first entry and if timeout is wanted */
627 first_entry = list_first_entry(&hdev->cs_mirror_list,
628 struct hl_cs, mirror_node) == cs;
629 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
630 first_entry && cs_needs_timeout(cs)) {
631 cs->tdr_active = true;
632 schedule_delayed_work(&cs->work_tdr, cs->timeout_jiffies);
636 spin_unlock(&hdev->cs_mirror_lock);
638 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
639 switch (job->queue_type) {
641 ext_queue_schedule_job(job);
644 int_queue_schedule_job(job);
647 hw_queue_schedule_job(job);
653 cs->submitted = true;
658 spin_unlock(&hdev->cs_mirror_lock);
660 q = &hdev->kernel_queues[0];
661 for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
662 if ((q->queue_type == QUEUE_TYPE_EXT) &&
663 (cs->jobs_in_queue_cnt[i])) {
664 atomic_t *free_slots =
665 &hdev->completion_queue[i].free_slots_cnt;
666 atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
672 hdev->asic_funcs->hw_queues_unlock(hdev);
678 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
680 * @hdev: pointer to hl_device structure
681 * @hw_queue_id: which queue to increment its ci
683 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
685 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
690 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
697 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
698 HL_QUEUE_SIZE_IN_BYTES,
701 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
702 HL_QUEUE_SIZE_IN_BYTES,
704 GFP_KERNEL | __GFP_ZERO);
708 q->kernel_address = p;
710 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
711 sizeof(*q->shadow_queue),
713 if (!q->shadow_queue) {
715 "Failed to allocate shadow queue for H/W queue %d\n",
721 /* Make sure read/write pointers are initialized to start of queue */
722 atomic_set(&q->ci, 0);
729 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
730 HL_QUEUE_SIZE_IN_BYTES,
733 hdev->asic_funcs->asic_dma_free_coherent(hdev,
734 HL_QUEUE_SIZE_IN_BYTES,
741 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
745 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
746 &q->bus_address, &q->int_queue_len);
749 "Failed to get base address for internal queue %d\n",
754 q->kernel_address = p;
756 atomic_set(&q->ci, 0);
761 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
763 return ext_and_cpu_queue_init(hdev, q, true);
766 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
768 return ext_and_cpu_queue_init(hdev, q, false);
771 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
775 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
776 HL_QUEUE_SIZE_IN_BYTES,
778 GFP_KERNEL | __GFP_ZERO);
782 q->kernel_address = p;
784 /* Make sure read/write pointers are initialized to start of queue */
785 atomic_set(&q->ci, 0);
791 static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
793 struct hl_sync_stream_properties *sync_stream_prop;
794 struct asic_fixed_properties *prop = &hdev->asic_prop;
795 struct hl_hw_sob *hw_sob;
796 int sob, reserved_mon_idx, queue_idx;
798 sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
800 /* We use 'collective_mon_idx' as a running index in order to reserve
801 * monitors for collective master/slave queues.
802 * collective master queue gets 2 reserved monitors
803 * collective slave queue gets 1 reserved monitor
805 if (hdev->kernel_queues[q_idx].collective_mode ==
806 HL_COLLECTIVE_MASTER) {
807 reserved_mon_idx = hdev->collective_mon_idx;
809 /* reserve the first monitor for collective master queue */
810 sync_stream_prop->collective_mstr_mon_id[0] =
811 prop->collective_first_mon + reserved_mon_idx;
813 /* reserve the second monitor for collective master queue */
814 sync_stream_prop->collective_mstr_mon_id[1] =
815 prop->collective_first_mon + reserved_mon_idx + 1;
817 hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS;
818 } else if (hdev->kernel_queues[q_idx].collective_mode ==
819 HL_COLLECTIVE_SLAVE) {
820 reserved_mon_idx = hdev->collective_mon_idx++;
822 /* reserve a monitor for collective slave queue */
823 sync_stream_prop->collective_slave_mon_id =
824 prop->collective_first_mon + reserved_mon_idx;
827 if (!hdev->kernel_queues[q_idx].supports_sync_stream)
830 queue_idx = hdev->sync_stream_queue_idx++;
832 sync_stream_prop->base_sob_id = prop->sync_stream_first_sob +
833 (queue_idx * HL_RSVD_SOBS);
834 sync_stream_prop->base_mon_id = prop->sync_stream_first_mon +
835 (queue_idx * HL_RSVD_MONS);
836 sync_stream_prop->next_sob_val = 1;
837 sync_stream_prop->curr_sob_offset = 0;
839 for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
840 hw_sob = &sync_stream_prop->hw_sob[sob];
842 hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
843 hw_sob->q_idx = q_idx;
844 kref_init(&hw_sob->kref);
848 static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
850 struct hl_sync_stream_properties *prop =
851 &hdev->kernel_queues[q_idx].sync_stream_prop;
854 * In case we got here due to a stuck CS, the refcnt might be bigger
855 * than 1 and therefore we reset it.
857 kref_init(&prop->hw_sob[prop->curr_sob_offset].kref);
858 prop->curr_sob_offset = 0;
859 prop->next_sob_val = 1;
863 * queue_init - main initialization function for H/W queue object
865 * @hdev: pointer to hl_device device structure
866 * @q: pointer to hl_hw_queue queue structure
867 * @hw_queue_id: The id of the H/W queue
869 * Allocate dma-able memory for the queue and initialize fields
870 * Returns 0 on success
872 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
877 q->hw_queue_id = hw_queue_id;
879 switch (q->queue_type) {
881 rc = ext_queue_init(hdev, q);
884 rc = int_queue_init(hdev, q);
887 rc = cpu_queue_init(hdev, q);
890 rc = hw_queue_init(hdev, q);
896 dev_crit(hdev->dev, "wrong queue type %d during init\n",
902 sync_stream_queue_init(hdev, q->hw_queue_id);
913 * hw_queue_fini - destroy queue
915 * @hdev: pointer to hl_device device structure
916 * @q: pointer to hl_hw_queue queue structure
918 * Free the queue memory
920 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
926 * If we arrived here, there are no jobs waiting on this queue
927 * so we can safely remove it.
928 * This is because this function can only called when:
929 * 1. Either a context is deleted, which only can occur if all its
931 * 2. A context wasn't able to be created due to failure or timeout,
932 * which means there are no jobs on the queue yet
934 * The only exception are the queues of the kernel context, but
935 * if they are being destroyed, it means that the entire module is
936 * being removed. If the module is removed, it means there is no open
937 * user context. It also means that if a job was submitted by
938 * the kernel driver (e.g. context creation), the job itself was
939 * released by the kernel driver when a timeout occurred on its
940 * Completion. Thus, we don't need to release it again.
943 if (q->queue_type == QUEUE_TYPE_INT)
946 kfree(q->shadow_queue);
948 if (q->queue_type == QUEUE_TYPE_CPU)
949 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
950 HL_QUEUE_SIZE_IN_BYTES,
953 hdev->asic_funcs->asic_dma_free_coherent(hdev,
954 HL_QUEUE_SIZE_IN_BYTES,
959 int hl_hw_queues_create(struct hl_device *hdev)
961 struct asic_fixed_properties *asic = &hdev->asic_prop;
962 struct hl_hw_queue *q;
963 int i, rc, q_ready_cnt;
965 hdev->kernel_queues = kcalloc(asic->max_queues,
966 sizeof(*hdev->kernel_queues), GFP_KERNEL);
968 if (!hdev->kernel_queues) {
969 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
973 /* Initialize the H/W queues */
974 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
975 i < asic->max_queues ; i++, q_ready_cnt++, q++) {
977 q->queue_type = asic->hw_queues_props[i].type;
978 q->supports_sync_stream =
979 asic->hw_queues_props[i].supports_sync_stream;
980 q->collective_mode = asic->hw_queues_props[i].collective_mode;
981 rc = queue_init(hdev, q, i);
984 "failed to initialize queue %d\n", i);
992 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
995 kfree(hdev->kernel_queues);
1000 void hl_hw_queues_destroy(struct hl_device *hdev)
1002 struct hl_hw_queue *q;
1003 u32 max_queues = hdev->asic_prop.max_queues;
1006 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
1007 queue_fini(hdev, q);
1009 kfree(hdev->kernel_queues);
1012 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
1014 struct hl_hw_queue *q;
1015 u32 max_queues = hdev->asic_prop.max_queues;
1018 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
1020 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
1023 atomic_set(&q->ci, 0);
1025 if (q->supports_sync_stream)
1026 sync_stream_queue_reset(hdev, q->hw_queue_id);