1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include "xe_guc_submit.h"
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/circ_buf.h>
11 #include <linux/delay.h>
12 #include <linux/dma-fence-array.h>
14 #include <drm/drm_managed.h>
16 #include "abi/guc_actions_abi.h"
17 #include "abi/guc_klvs_abi.h"
18 #include "regs/xe_lrc_layout.h"
19 #include "xe_assert.h"
20 #include "xe_devcoredump.h"
21 #include "xe_device.h"
22 #include "xe_exec_queue.h"
23 #include "xe_force_wake.h"
24 #include "xe_gpu_scheduler.h"
26 #include "xe_gt_printk.h"
28 #include "xe_guc_ct.h"
29 #include "xe_guc_exec_queue_types.h"
30 #include "xe_guc_submit_types.h"
31 #include "xe_hw_engine.h"
32 #include "xe_hw_fence.h"
34 #include "xe_macros.h"
37 #include "xe_ring_ops_types.h"
38 #include "xe_sched_job.h"
42 static struct xe_guc *
43 exec_queue_to_guc(struct xe_exec_queue *q)
45 return &q->gt->uc.guc;
49 * Helpers for engine state, using an atomic as some of the bits can transition
50 * as the same time (e.g. a suspend can be happning at the same time as schedule
51 * engine done being processed).
53 #define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
54 #define ENGINE_STATE_ENABLED (1 << 1)
55 #define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
56 #define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
57 #define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
58 #define ENGINE_STATE_SUSPENDED (1 << 5)
59 #define EXEC_QUEUE_STATE_RESET (1 << 6)
60 #define ENGINE_STATE_KILLED (1 << 7)
62 static bool exec_queue_registered(struct xe_exec_queue *q)
64 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
67 static void set_exec_queue_registered(struct xe_exec_queue *q)
69 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
72 static void clear_exec_queue_registered(struct xe_exec_queue *q)
74 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
77 static bool exec_queue_enabled(struct xe_exec_queue *q)
79 return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
82 static void set_exec_queue_enabled(struct xe_exec_queue *q)
84 atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
87 static void clear_exec_queue_enabled(struct xe_exec_queue *q)
89 atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
92 static bool exec_queue_pending_enable(struct xe_exec_queue *q)
94 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
97 static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
99 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
102 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
104 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
107 static bool exec_queue_pending_disable(struct xe_exec_queue *q)
109 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
112 static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
114 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
117 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
119 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
122 static bool exec_queue_destroyed(struct xe_exec_queue *q)
124 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
127 static void set_exec_queue_destroyed(struct xe_exec_queue *q)
129 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
132 static bool exec_queue_banned(struct xe_exec_queue *q)
134 return (q->flags & EXEC_QUEUE_FLAG_BANNED);
137 static void set_exec_queue_banned(struct xe_exec_queue *q)
139 q->flags |= EXEC_QUEUE_FLAG_BANNED;
142 static bool exec_queue_suspended(struct xe_exec_queue *q)
144 return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED;
147 static void set_exec_queue_suspended(struct xe_exec_queue *q)
149 atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state);
152 static void clear_exec_queue_suspended(struct xe_exec_queue *q)
154 atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state);
157 static bool exec_queue_reset(struct xe_exec_queue *q)
159 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
162 static void set_exec_queue_reset(struct xe_exec_queue *q)
164 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
167 static bool exec_queue_killed(struct xe_exec_queue *q)
169 return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED;
172 static void set_exec_queue_killed(struct xe_exec_queue *q)
174 atomic_or(ENGINE_STATE_KILLED, &q->guc->state);
177 static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
179 return exec_queue_killed(q) || exec_queue_banned(q);
182 #ifdef CONFIG_PROVE_LOCKING
183 static int alloc_submit_wq(struct xe_guc *guc)
187 for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
188 guc->submission_state.submit_wq_pool[i] =
189 alloc_ordered_workqueue("submit_wq", 0);
190 if (!guc->submission_state.submit_wq_pool[i])
198 destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
203 static void free_submit_wq(struct xe_guc *guc)
207 for (i = 0; i < NUM_SUBMIT_WQ; ++i)
208 destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
211 static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
213 int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
215 return guc->submission_state.submit_wq_pool[idx];
218 static int alloc_submit_wq(struct xe_guc *guc)
223 static void free_submit_wq(struct xe_guc *guc)
228 static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
234 static void guc_submit_fini(struct drm_device *drm, void *arg)
236 struct xe_guc *guc = arg;
238 xa_destroy(&guc->submission_state.exec_queue_lookup);
239 ida_destroy(&guc->submission_state.guc_ids);
240 bitmap_free(guc->submission_state.guc_ids_bitmap);
242 mutex_destroy(&guc->submission_state.lock);
245 #define GUC_ID_MAX 65535
246 #define GUC_ID_NUMBER_MLRC 4096
247 #define GUC_ID_NUMBER_SLRC (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
248 #define GUC_ID_START_MLRC GUC_ID_NUMBER_SLRC
250 static const struct xe_exec_queue_ops guc_exec_queue_ops;
252 static void primelockdep(struct xe_guc *guc)
254 if (!IS_ENABLED(CONFIG_LOCKDEP))
257 fs_reclaim_acquire(GFP_KERNEL);
259 mutex_lock(&guc->submission_state.lock);
260 might_lock(&guc->submission_state.suspend.lock);
261 mutex_unlock(&guc->submission_state.lock);
263 fs_reclaim_release(GFP_KERNEL);
266 int xe_guc_submit_init(struct xe_guc *guc)
268 struct xe_device *xe = guc_to_xe(guc);
269 struct xe_gt *gt = guc_to_gt(guc);
272 guc->submission_state.guc_ids_bitmap =
273 bitmap_zalloc(GUC_ID_NUMBER_MLRC, GFP_KERNEL);
274 if (!guc->submission_state.guc_ids_bitmap)
277 err = alloc_submit_wq(guc);
279 bitmap_free(guc->submission_state.guc_ids_bitmap);
283 gt->exec_queue_ops = &guc_exec_queue_ops;
285 mutex_init(&guc->submission_state.lock);
286 xa_init(&guc->submission_state.exec_queue_lookup);
287 ida_init(&guc->submission_state.guc_ids);
289 spin_lock_init(&guc->submission_state.suspend.lock);
290 guc->submission_state.suspend.context = dma_fence_context_alloc(1);
294 err = drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
301 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
305 lockdep_assert_held(&guc->submission_state.lock);
307 for (i = 0; i < xa_count; ++i)
308 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
310 if (xe_exec_queue_is_parallel(q))
311 bitmap_release_region(guc->submission_state.guc_ids_bitmap,
312 q->guc->id - GUC_ID_START_MLRC,
313 order_base_2(q->width));
315 ida_free(&guc->submission_state.guc_ids, q->guc->id);
318 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
325 * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
326 * worse case user gets -ENOMEM on engine create and has to try again.
328 * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
331 lockdep_assert_held(&guc->submission_state.lock);
333 if (xe_exec_queue_is_parallel(q)) {
334 void *bitmap = guc->submission_state.guc_ids_bitmap;
336 ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
337 order_base_2(q->width));
339 ret = ida_alloc_max(&guc->submission_state.guc_ids,
340 GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
346 if (xe_exec_queue_is_parallel(q))
347 q->guc->id += GUC_ID_START_MLRC;
349 for (i = 0; i < q->width; ++i) {
350 ptr = xa_store(&guc->submission_state.exec_queue_lookup,
351 q->guc->id + i, q, GFP_NOWAIT);
361 __release_guc_id(guc, q, i);
366 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
368 mutex_lock(&guc->submission_state.lock);
369 __release_guc_id(guc, q, q->width);
370 mutex_unlock(&guc->submission_state.lock);
373 struct exec_queue_policy {
375 struct guc_update_exec_queue_policy h2g;
378 static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
380 size_t bytes = sizeof(policy->h2g.header) +
381 (sizeof(policy->h2g.klv[0]) * policy->count);
383 return bytes / sizeof(u32);
386 static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
389 policy->h2g.header.action =
390 XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
391 policy->h2g.header.guc_id = guc_id;
395 #define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
396 static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
399 XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
401 policy->h2g.klv[policy->count].kl = \
402 FIELD_PREP(GUC_KLV_0_KEY, \
403 GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
404 FIELD_PREP(GUC_KLV_0_LEN, 1); \
405 policy->h2g.klv[policy->count].value = data; \
409 MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
410 MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
411 MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
412 #undef MAKE_EXEC_QUEUE_POLICY_ADD
414 static const int xe_exec_queue_prio_to_guc[] = {
415 [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
416 [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
417 [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
418 [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
421 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
423 struct exec_queue_policy policy;
424 struct xe_device *xe = guc_to_xe(guc);
425 enum xe_exec_queue_priority prio = q->sched_props.priority;
426 u32 timeslice_us = q->sched_props.timeslice_us;
427 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
429 xe_assert(xe, exec_queue_registered(q));
431 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
432 __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
433 __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
434 __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
436 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
437 __guc_exec_queue_policy_action_size(&policy), 0, 0);
440 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
442 struct exec_queue_policy policy;
444 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
445 __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
447 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
448 __guc_exec_queue_policy_action_size(&policy), 0, 0);
451 #define parallel_read(xe_, map_, field_) \
452 xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
454 #define parallel_write(xe_, map_, field_, val_) \
455 xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
458 static void __register_mlrc_engine(struct xe_guc *guc,
459 struct xe_exec_queue *q,
460 struct guc_ctxt_registration_info *info)
462 #define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
463 struct xe_device *xe = guc_to_xe(guc);
464 u32 action[MAX_MLRC_REG_SIZE];
468 xe_assert(xe, xe_exec_queue_is_parallel(q));
470 action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
471 action[len++] = info->flags;
472 action[len++] = info->context_idx;
473 action[len++] = info->engine_class;
474 action[len++] = info->engine_submit_mask;
475 action[len++] = info->wq_desc_lo;
476 action[len++] = info->wq_desc_hi;
477 action[len++] = info->wq_base_lo;
478 action[len++] = info->wq_base_hi;
479 action[len++] = info->wq_size;
480 action[len++] = q->width;
481 action[len++] = info->hwlrca_lo;
482 action[len++] = info->hwlrca_hi;
484 for (i = 1; i < q->width; ++i) {
485 struct xe_lrc *lrc = q->lrc + i;
487 action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
488 action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
491 xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
492 #undef MAX_MLRC_REG_SIZE
494 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
497 static void __register_engine(struct xe_guc *guc,
498 struct guc_ctxt_registration_info *info)
501 XE_GUC_ACTION_REGISTER_CONTEXT,
505 info->engine_submit_mask,
515 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
518 static void register_engine(struct xe_exec_queue *q)
520 struct xe_guc *guc = exec_queue_to_guc(q);
521 struct xe_device *xe = guc_to_xe(guc);
522 struct xe_lrc *lrc = q->lrc;
523 struct guc_ctxt_registration_info info;
525 xe_assert(xe, !exec_queue_registered(q));
527 memset(&info, 0, sizeof(info));
528 info.context_idx = q->guc->id;
529 info.engine_class = xe_engine_class_to_guc_class(q->class);
530 info.engine_submit_mask = q->logical_mask;
531 info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
532 info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
533 info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
535 if (xe_exec_queue_is_parallel(q)) {
536 u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
537 struct iosys_map map = xe_lrc_parallel_map(lrc);
539 info.wq_desc_lo = lower_32_bits(ggtt_addr +
540 offsetof(struct guc_submit_parallel_scratch, wq_desc));
541 info.wq_desc_hi = upper_32_bits(ggtt_addr +
542 offsetof(struct guc_submit_parallel_scratch, wq_desc));
543 info.wq_base_lo = lower_32_bits(ggtt_addr +
544 offsetof(struct guc_submit_parallel_scratch, wq[0]));
545 info.wq_base_hi = upper_32_bits(ggtt_addr +
546 offsetof(struct guc_submit_parallel_scratch, wq[0]));
547 info.wq_size = WQ_SIZE;
549 q->guc->wqi_head = 0;
550 q->guc->wqi_tail = 0;
551 xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
552 parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
556 * We must keep a reference for LR engines if engine is registered with
557 * the GuC as jobs signal immediately and can't destroy an engine if the
558 * GuC has a reference to it.
560 if (xe_exec_queue_is_lr(q))
561 xe_exec_queue_get(q);
563 set_exec_queue_registered(q);
564 trace_xe_exec_queue_register(q);
565 if (xe_exec_queue_is_parallel(q))
566 __register_mlrc_engine(guc, q, &info);
568 __register_engine(guc, &info);
569 init_policies(guc, q);
572 static u32 wq_space_until_wrap(struct xe_exec_queue *q)
574 return (WQ_SIZE - q->guc->wqi_tail);
577 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
579 struct xe_guc *guc = exec_queue_to_guc(q);
580 struct xe_device *xe = guc_to_xe(guc);
581 struct iosys_map map = xe_lrc_parallel_map(q->lrc);
582 unsigned int sleep_period_ms = 1;
584 #define AVAILABLE_SPACE \
585 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
586 if (wqi_size > AVAILABLE_SPACE) {
588 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
589 if (wqi_size > AVAILABLE_SPACE) {
590 if (sleep_period_ms == 1024) {
591 xe_gt_reset_async(q->gt);
595 msleep(sleep_period_ms);
596 sleep_period_ms <<= 1;
600 #undef AVAILABLE_SPACE
605 static int wq_noop_append(struct xe_exec_queue *q)
607 struct xe_guc *guc = exec_queue_to_guc(q);
608 struct xe_device *xe = guc_to_xe(guc);
609 struct iosys_map map = xe_lrc_parallel_map(q->lrc);
610 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
612 if (wq_wait_for_space(q, wq_space_until_wrap(q)))
615 xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
617 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
618 FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
619 FIELD_PREP(WQ_LEN_MASK, len_dw));
620 q->guc->wqi_tail = 0;
625 static void wq_item_append(struct xe_exec_queue *q)
627 struct xe_guc *guc = exec_queue_to_guc(q);
628 struct xe_device *xe = guc_to_xe(guc);
629 struct iosys_map map = xe_lrc_parallel_map(q->lrc);
630 #define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
631 u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
632 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
633 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
636 if (wqi_size > wq_space_until_wrap(q)) {
637 if (wq_noop_append(q))
640 if (wq_wait_for_space(q, wqi_size))
643 wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
644 FIELD_PREP(WQ_LEN_MASK, len_dw);
645 wqi[i++] = xe_lrc_descriptor(q->lrc);
646 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
647 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc->ring.tail / sizeof(u64));
649 for (j = 1; j < q->width; ++j) {
650 struct xe_lrc *lrc = q->lrc + j;
652 wqi[i++] = lrc->ring.tail / sizeof(u64);
655 xe_assert(xe, i == wqi_size / sizeof(u32));
657 iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
658 wq[q->guc->wqi_tail / sizeof(u32)]));
659 xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
660 q->guc->wqi_tail += wqi_size;
661 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
665 map = xe_lrc_parallel_map(q->lrc);
666 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
669 #define RESUME_PENDING ~0x0ull
670 static void submit_exec_queue(struct xe_exec_queue *q)
672 struct xe_guc *guc = exec_queue_to_guc(q);
673 struct xe_device *xe = guc_to_xe(guc);
674 struct xe_lrc *lrc = q->lrc;
679 bool extra_submit = false;
681 xe_assert(xe, exec_queue_registered(q));
683 if (xe_exec_queue_is_parallel(q))
686 xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
688 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
691 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
692 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
693 action[len++] = q->guc->id;
694 action[len++] = GUC_CONTEXT_ENABLE;
695 g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
697 if (xe_exec_queue_is_parallel(q))
700 q->guc->resume_time = RESUME_PENDING;
701 set_exec_queue_pending_enable(q);
702 set_exec_queue_enabled(q);
703 trace_xe_exec_queue_scheduling_enable(q);
705 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
706 action[len++] = q->guc->id;
707 trace_xe_exec_queue_submit(q);
710 xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
714 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
715 action[len++] = q->guc->id;
716 trace_xe_exec_queue_submit(q);
718 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
722 static struct dma_fence *
723 guc_exec_queue_run_job(struct drm_sched_job *drm_job)
725 struct xe_sched_job *job = to_xe_sched_job(drm_job);
726 struct xe_exec_queue *q = job->q;
727 struct xe_guc *guc = exec_queue_to_guc(q);
728 struct xe_device *xe = guc_to_xe(guc);
729 bool lr = xe_exec_queue_is_lr(q);
731 xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
732 exec_queue_banned(q) || exec_queue_suspended(q));
734 trace_xe_sched_job_run(job);
736 if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) {
737 if (!exec_queue_registered(q))
739 if (!lr) /* LR jobs are emitted in the exec IOCTL */
740 q->ring_ops->emit_job(job);
741 submit_exec_queue(q);
745 xe_sched_job_set_error(job, -EOPNOTSUPP);
747 } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
750 return dma_fence_get(job->fence);
754 static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
756 struct xe_sched_job *job = to_xe_sched_job(drm_job);
758 trace_xe_sched_job_free(job);
759 xe_sched_job_put(job);
762 static int guc_read_stopped(struct xe_guc *guc)
764 return atomic_read(&guc->submission_state.stopped);
767 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
769 XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, \
771 GUC_CONTEXT_##enable_disable, \
774 static void disable_scheduling_deregister(struct xe_guc *guc,
775 struct xe_exec_queue *q)
777 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
778 struct xe_device *xe = guc_to_xe(guc);
781 set_min_preemption_timeout(guc, q);
783 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
784 guc_read_stopped(guc), HZ * 5);
786 struct xe_gpu_scheduler *sched = &q->guc->sched;
788 drm_warn(&xe->drm, "Pending enable failed to respond");
789 xe_sched_submission_start(sched);
790 xe_gt_reset_async(q->gt);
791 xe_sched_tdr_queue_imm(sched);
795 clear_exec_queue_enabled(q);
796 set_exec_queue_pending_disable(q);
797 set_exec_queue_destroyed(q);
798 trace_xe_exec_queue_scheduling_disable(q);
801 * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
802 * handler and we are not allowed to reserved G2H space in handlers.
804 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
805 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET +
806 G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
809 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p);
811 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
812 static void simple_error_capture(struct xe_exec_queue *q)
814 struct xe_guc *guc = exec_queue_to_guc(q);
815 struct xe_device *xe = guc_to_xe(guc);
816 struct drm_printer p = drm_err_printer(&xe->drm, NULL);
817 struct xe_hw_engine *hwe;
818 enum xe_hw_engine_id id;
819 u32 adj_logical_mask = q->logical_mask;
820 u32 width_mask = (0x1 << q->width) - 1;
824 if (q->vm && !q->vm->error_capture.capture_once) {
825 q->vm->error_capture.capture_once = true;
826 cookie = dma_fence_begin_signalling();
827 for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
828 if (adj_logical_mask & BIT(i)) {
829 adj_logical_mask |= width_mask << i;
836 xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
837 xe_guc_ct_print(&guc->ct, &p, true);
838 guc_exec_queue_print(q, &p);
839 for_each_hw_engine(hwe, guc_to_gt(guc), id) {
840 if (hwe->class != q->hwe->class ||
841 !(BIT(hwe->logical_instance) & adj_logical_mask))
843 xe_hw_engine_print(hwe, &p);
845 xe_analyze_vm(&p, q->vm, q->gt->info.id);
846 xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
847 dma_fence_end_signalling(cookie);
851 static void simple_error_capture(struct xe_exec_queue *q)
856 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
858 struct xe_guc *guc = exec_queue_to_guc(q);
859 struct xe_device *xe = guc_to_xe(guc);
861 /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
862 wake_up_all(&xe->ufence_wq);
864 if (xe_exec_queue_is_lr(q))
865 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
867 xe_sched_tdr_queue_imm(&q->guc->sched);
870 static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
872 struct xe_guc_exec_queue *ge =
873 container_of(w, struct xe_guc_exec_queue, lr_tdr);
874 struct xe_exec_queue *q = ge->q;
875 struct xe_guc *guc = exec_queue_to_guc(q);
876 struct xe_device *xe = guc_to_xe(guc);
877 struct xe_gpu_scheduler *sched = &ge->sched;
879 xe_assert(xe, xe_exec_queue_is_lr(q));
880 trace_xe_exec_queue_lr_cleanup(q);
882 /* Kill the run_job / process_msg entry points */
883 xe_sched_submission_stop(sched);
886 * Engine state now mostly stable, disable scheduling / deregister if
887 * needed. This cleanup routine might be called multiple times, where
888 * the actual async engine deregister drops the final engine ref.
889 * Calling disable_scheduling_deregister will mark the engine as
890 * destroyed and fire off the CT requests to disable scheduling /
891 * deregister, which we only want to do once. We also don't want to mark
892 * the engine as pending_disable again as this may race with the
893 * xe_guc_deregister_done_handler() which treats it as an unexpected
896 if (exec_queue_registered(q) && !exec_queue_destroyed(q)) {
897 struct xe_guc *guc = exec_queue_to_guc(q);
900 set_exec_queue_banned(q);
901 disable_scheduling_deregister(guc, q);
904 * Must wait for scheduling to be disabled before signalling
905 * any fences, if GT broken the GT reset code should signal us.
907 ret = wait_event_timeout(guc->ct.wq,
908 !exec_queue_pending_disable(q) ||
909 guc_read_stopped(guc), HZ * 5);
911 drm_warn(&xe->drm, "Schedule disable failed to respond");
912 xe_sched_submission_start(sched);
913 xe_gt_reset_async(q->gt);
918 xe_sched_submission_start(sched);
921 static enum drm_gpu_sched_stat
922 guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
924 struct xe_sched_job *job = to_xe_sched_job(drm_job);
925 struct xe_sched_job *tmp_job;
926 struct xe_exec_queue *q = job->q;
927 struct xe_gpu_scheduler *sched = &q->guc->sched;
928 struct xe_device *xe = guc_to_xe(exec_queue_to_guc(q));
932 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
933 drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
934 xe_sched_job_seqno(job), q->guc->id, q->flags);
935 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
936 "Kernel-submitted job timed out\n");
937 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
938 "VM job timed out on non-killed execqueue\n");
940 simple_error_capture(q);
943 drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
944 xe_sched_job_seqno(job), q->guc->id, q->flags);
946 trace_xe_sched_job_timedout(job);
948 /* Kill the run_job entry point */
949 xe_sched_submission_stop(sched);
952 * Kernel jobs should never fail, nor should VM jobs if they do
953 * somethings has gone wrong and the GT needs a reset
955 if (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
956 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))) {
957 if (!xe_sched_invalidate_job(job, 2)) {
958 xe_sched_add_pending_job(sched, job);
959 xe_sched_submission_start(sched);
960 xe_gt_reset_async(q->gt);
965 /* Engine state now stable, disable scheduling if needed */
966 if (exec_queue_registered(q)) {
967 struct xe_guc *guc = exec_queue_to_guc(q);
970 if (exec_queue_reset(q))
972 set_exec_queue_banned(q);
973 if (!exec_queue_destroyed(q)) {
974 xe_exec_queue_get(q);
975 disable_scheduling_deregister(guc, q);
979 * Must wait for scheduling to be disabled before signalling
980 * any fences, if GT broken the GT reset code should signal us.
982 * FIXME: Tests can generate a ton of 0x6000 (IOMMU CAT fault
983 * error) messages which can cause the schedule disable to get
984 * lost. If this occurs, trigger a GT reset to recover.
987 ret = wait_event_timeout(guc->ct.wq,
988 !exec_queue_pending_disable(q) ||
989 guc_read_stopped(guc), HZ * 5);
990 if (!ret || guc_read_stopped(guc)) {
991 drm_warn(&xe->drm, "Schedule disable failed to respond");
992 xe_sched_add_pending_job(sched, job);
993 xe_sched_submission_start(sched);
994 xe_gt_reset_async(q->gt);
995 xe_sched_tdr_queue_imm(sched);
1000 /* Stop fence signaling */
1001 xe_hw_fence_irq_stop(q->fence_irq);
1004 * Fence state now stable, stop / start scheduler which cleans up any
1005 * fences that are complete
1007 xe_sched_add_pending_job(sched, job);
1008 xe_sched_submission_start(sched);
1009 xe_guc_exec_queue_trigger_cleanup(q);
1011 /* Mark all outstanding jobs as bad, thus completing them */
1012 spin_lock(&sched->base.job_list_lock);
1013 list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
1014 xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
1015 spin_unlock(&sched->base.job_list_lock);
1017 /* Start fence signaling */
1018 xe_hw_fence_irq_start(q->fence_irq);
1021 return DRM_GPU_SCHED_STAT_NOMINAL;
1024 static void __guc_exec_queue_fini_async(struct work_struct *w)
1026 struct xe_guc_exec_queue *ge =
1027 container_of(w, struct xe_guc_exec_queue, fini_async);
1028 struct xe_exec_queue *q = ge->q;
1029 struct xe_guc *guc = exec_queue_to_guc(q);
1031 trace_xe_exec_queue_destroy(q);
1033 if (xe_exec_queue_is_lr(q))
1034 cancel_work_sync(&ge->lr_tdr);
1035 release_guc_id(guc, q);
1036 xe_sched_entity_fini(&ge->entity);
1037 xe_sched_fini(&ge->sched);
1040 xe_exec_queue_fini(q);
1043 static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1045 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1047 /* We must block on kernel engines so slabs are empty on driver unload */
1048 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT)
1049 __guc_exec_queue_fini_async(&q->guc->fini_async);
1051 queue_work(system_wq, &q->guc->fini_async);
1054 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1057 * Might be done from within the GPU scheduler, need to do async as we
1058 * fini the scheduler when the engine is fini'd, the scheduler can't
1059 * complete fini within itself (circular dependency). Async resolves
1060 * this we and don't really care when everything is fini'd, just that it
1063 guc_exec_queue_fini_async(q);
1066 static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
1068 struct xe_exec_queue *q = msg->private_data;
1069 struct xe_guc *guc = exec_queue_to_guc(q);
1070 struct xe_device *xe = guc_to_xe(guc);
1072 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
1073 trace_xe_exec_queue_cleanup_entity(q);
1075 if (exec_queue_registered(q))
1076 disable_scheduling_deregister(guc, q);
1078 __guc_exec_queue_fini(guc, q);
1081 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
1083 return !exec_queue_killed_or_banned(q) && exec_queue_registered(q);
1086 static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
1088 struct xe_exec_queue *q = msg->private_data;
1089 struct xe_guc *guc = exec_queue_to_guc(q);
1091 if (guc_exec_queue_allowed_to_change_state(q))
1092 init_policies(guc, q);
1096 static void suspend_fence_signal(struct xe_exec_queue *q)
1098 struct xe_guc *guc = exec_queue_to_guc(q);
1099 struct xe_device *xe = guc_to_xe(guc);
1101 xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
1102 guc_read_stopped(guc));
1103 xe_assert(xe, q->guc->suspend_pending);
1105 q->guc->suspend_pending = false;
1107 wake_up(&q->guc->suspend_wait);
1110 static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
1112 struct xe_exec_queue *q = msg->private_data;
1113 struct xe_guc *guc = exec_queue_to_guc(q);
1115 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
1116 exec_queue_enabled(q)) {
1117 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
1118 guc_read_stopped(guc));
1120 if (!guc_read_stopped(guc)) {
1121 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
1122 s64 since_resume_ms =
1123 ktime_ms_delta(ktime_get(),
1124 q->guc->resume_time);
1125 s64 wait_ms = q->vm->preempt.min_run_period_ms -
1128 if (wait_ms > 0 && q->guc->resume_time)
1131 set_exec_queue_suspended(q);
1132 clear_exec_queue_enabled(q);
1133 set_exec_queue_pending_disable(q);
1134 trace_xe_exec_queue_scheduling_disable(q);
1136 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1137 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1139 } else if (q->guc->suspend_pending) {
1140 set_exec_queue_suspended(q);
1141 suspend_fence_signal(q);
1145 static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
1147 struct xe_exec_queue *q = msg->private_data;
1148 struct xe_guc *guc = exec_queue_to_guc(q);
1150 if (guc_exec_queue_allowed_to_change_state(q)) {
1151 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
1153 q->guc->resume_time = RESUME_PENDING;
1154 clear_exec_queue_suspended(q);
1155 set_exec_queue_pending_enable(q);
1156 set_exec_queue_enabled(q);
1157 trace_xe_exec_queue_scheduling_enable(q);
1159 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1160 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1162 clear_exec_queue_suspended(q);
1166 #define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
1167 #define SET_SCHED_PROPS 2
1171 static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
1173 trace_xe_sched_msg_recv(msg);
1175 switch (msg->opcode) {
1177 __guc_exec_queue_process_msg_cleanup(msg);
1179 case SET_SCHED_PROPS:
1180 __guc_exec_queue_process_msg_set_sched_props(msg);
1183 __guc_exec_queue_process_msg_suspend(msg);
1186 __guc_exec_queue_process_msg_resume(msg);
1189 XE_WARN_ON("Unknown message type");
1193 static const struct drm_sched_backend_ops drm_sched_ops = {
1194 .run_job = guc_exec_queue_run_job,
1195 .free_job = guc_exec_queue_free_job,
1196 .timedout_job = guc_exec_queue_timedout_job,
1199 static const struct xe_sched_backend_ops xe_sched_ops = {
1200 .process_msg = guc_exec_queue_process_msg,
1203 static int guc_exec_queue_init(struct xe_exec_queue *q)
1205 struct xe_gpu_scheduler *sched;
1206 struct xe_guc *guc = exec_queue_to_guc(q);
1207 struct xe_device *xe = guc_to_xe(guc);
1208 struct xe_guc_exec_queue *ge;
1212 xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
1214 ge = kzalloc(sizeof(*ge), GFP_KERNEL);
1220 init_waitqueue_head(&ge->suspend_wait);
1222 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
1223 q->sched_props.job_timeout_ms;
1224 err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
1226 q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
1227 timeout, guc_to_gt(guc)->ordered_wq, NULL,
1228 q->name, gt_to_xe(q->gt)->drm.dev);
1233 err = xe_sched_entity_init(&ge->entity, sched);
1237 if (xe_exec_queue_is_lr(q))
1238 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
1240 mutex_lock(&guc->submission_state.lock);
1242 err = alloc_guc_id(guc, q);
1246 q->entity = &ge->entity;
1248 if (guc_read_stopped(guc))
1249 xe_sched_stop(sched);
1251 mutex_unlock(&guc->submission_state.lock);
1253 xe_exec_queue_assign_name(q, q->guc->id);
1255 trace_xe_exec_queue_create(q);
1260 xe_sched_entity_fini(&ge->entity);
1262 xe_sched_fini(&ge->sched);
1269 static void guc_exec_queue_kill(struct xe_exec_queue *q)
1271 trace_xe_exec_queue_kill(q);
1272 set_exec_queue_killed(q);
1273 xe_guc_exec_queue_trigger_cleanup(q);
1276 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
1279 INIT_LIST_HEAD(&msg->link);
1280 msg->opcode = opcode;
1281 msg->private_data = q;
1283 trace_xe_sched_msg_add(msg);
1284 xe_sched_add_msg(&q->guc->sched, msg);
1287 #define STATIC_MSG_CLEANUP 0
1288 #define STATIC_MSG_SUSPEND 1
1289 #define STATIC_MSG_RESUME 2
1290 static void guc_exec_queue_fini(struct xe_exec_queue *q)
1292 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
1294 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT))
1295 guc_exec_queue_add_msg(q, msg, CLEANUP);
1297 __guc_exec_queue_fini(exec_queue_to_guc(q), q);
1300 static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
1301 enum xe_exec_queue_priority priority)
1303 struct xe_sched_msg *msg;
1305 if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
1308 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1312 q->sched_props.priority = priority;
1313 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1318 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
1320 struct xe_sched_msg *msg;
1322 if (q->sched_props.timeslice_us == timeslice_us ||
1323 exec_queue_killed_or_banned(q))
1326 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1330 q->sched_props.timeslice_us = timeslice_us;
1331 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1336 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
1337 u32 preempt_timeout_us)
1339 struct xe_sched_msg *msg;
1341 if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
1342 exec_queue_killed_or_banned(q))
1345 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1349 q->sched_props.preempt_timeout_us = preempt_timeout_us;
1350 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1355 static int guc_exec_queue_suspend(struct xe_exec_queue *q)
1357 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
1359 if (exec_queue_killed_or_banned(q) || q->guc->suspend_pending)
1362 q->guc->suspend_pending = true;
1363 guc_exec_queue_add_msg(q, msg, SUSPEND);
1368 static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
1370 struct xe_guc *guc = exec_queue_to_guc(q);
1372 wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
1373 guc_read_stopped(guc));
1376 static void guc_exec_queue_resume(struct xe_exec_queue *q)
1378 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
1379 struct xe_guc *guc = exec_queue_to_guc(q);
1380 struct xe_device *xe = guc_to_xe(guc);
1382 xe_assert(xe, !q->guc->suspend_pending);
1384 guc_exec_queue_add_msg(q, msg, RESUME);
1387 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
1389 return exec_queue_reset(q);
1393 * All of these functions are an abstraction layer which other parts of XE can
1394 * use to trap into the GuC backend. All of these functions, aside from init,
1395 * really shouldn't do much other than trap into the DRM scheduler which
1396 * synchronizes these operations.
1398 static const struct xe_exec_queue_ops guc_exec_queue_ops = {
1399 .init = guc_exec_queue_init,
1400 .kill = guc_exec_queue_kill,
1401 .fini = guc_exec_queue_fini,
1402 .set_priority = guc_exec_queue_set_priority,
1403 .set_timeslice = guc_exec_queue_set_timeslice,
1404 .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
1405 .suspend = guc_exec_queue_suspend,
1406 .suspend_wait = guc_exec_queue_suspend_wait,
1407 .resume = guc_exec_queue_resume,
1408 .reset_status = guc_exec_queue_reset_status,
1411 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
1413 struct xe_gpu_scheduler *sched = &q->guc->sched;
1415 /* Stop scheduling + flush any DRM scheduler operations */
1416 xe_sched_submission_stop(sched);
1418 /* Clean up lost G2H + reset engine state */
1419 if (exec_queue_registered(q)) {
1420 if ((exec_queue_banned(q) && exec_queue_destroyed(q)) ||
1421 xe_exec_queue_is_lr(q))
1422 xe_exec_queue_put(q);
1423 else if (exec_queue_destroyed(q))
1424 __guc_exec_queue_fini(guc, q);
1426 if (q->guc->suspend_pending) {
1427 set_exec_queue_suspended(q);
1428 suspend_fence_signal(q);
1430 atomic_and(EXEC_QUEUE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
1432 q->guc->resume_time = 0;
1433 trace_xe_exec_queue_stop(q);
1436 * Ban any engine (aside from kernel and engines used for VM ops) with a
1437 * started but not complete job or if a job has gone through a GT reset
1440 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
1441 struct xe_sched_job *job = xe_sched_first_pending_job(sched);
1444 if ((xe_sched_job_started(job) &&
1445 !xe_sched_job_completed(job)) ||
1446 xe_sched_invalidate_job(job, 2)) {
1447 trace_xe_sched_job_ban(job);
1448 xe_sched_tdr_queue_imm(&q->guc->sched);
1449 set_exec_queue_banned(q);
1455 int xe_guc_submit_reset_prepare(struct xe_guc *guc)
1460 * Using an atomic here rather than submission_state.lock as this
1461 * function can be called while holding the CT lock (engine reset
1462 * failure). submission_state.lock needs the CT lock to resubmit jobs.
1463 * Atomic is not ideal, but it works to prevent against concurrent reset
1464 * and releasing any TDRs waiting on guc->submission_state.stopped.
1466 ret = atomic_fetch_or(1, &guc->submission_state.stopped);
1468 wake_up_all(&guc->ct.wq);
1473 void xe_guc_submit_reset_wait(struct xe_guc *guc)
1475 wait_event(guc->ct.wq, !guc_read_stopped(guc));
1478 int xe_guc_submit_stop(struct xe_guc *guc)
1480 struct xe_exec_queue *q;
1481 unsigned long index;
1482 struct xe_device *xe = guc_to_xe(guc);
1484 xe_assert(xe, guc_read_stopped(guc) == 1);
1486 mutex_lock(&guc->submission_state.lock);
1488 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1489 guc_exec_queue_stop(guc, q);
1491 mutex_unlock(&guc->submission_state.lock);
1494 * No one can enter the backend at this point, aside from new engine
1495 * creation which is protected by guc->submission_state.lock.
1501 static void guc_exec_queue_start(struct xe_exec_queue *q)
1503 struct xe_gpu_scheduler *sched = &q->guc->sched;
1505 if (!exec_queue_killed_or_banned(q)) {
1508 trace_xe_exec_queue_resubmit(q);
1509 for (i = 0; i < q->width; ++i)
1510 xe_lrc_set_ring_head(q->lrc + i, q->lrc[i].ring.tail);
1511 xe_sched_resubmit_jobs(sched);
1514 xe_sched_submission_start(sched);
1517 int xe_guc_submit_start(struct xe_guc *guc)
1519 struct xe_exec_queue *q;
1520 unsigned long index;
1521 struct xe_device *xe = guc_to_xe(guc);
1523 xe_assert(xe, guc_read_stopped(guc) == 1);
1525 mutex_lock(&guc->submission_state.lock);
1526 atomic_dec(&guc->submission_state.stopped);
1527 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1528 guc_exec_queue_start(q);
1529 mutex_unlock(&guc->submission_state.lock);
1531 wake_up_all(&guc->ct.wq);
1536 static struct xe_exec_queue *
1537 g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
1539 struct xe_device *xe = guc_to_xe(guc);
1540 struct xe_exec_queue *q;
1542 if (unlikely(guc_id >= GUC_ID_MAX)) {
1543 drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
1547 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
1549 drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
1553 xe_assert(xe, guc_id >= q->guc->id);
1554 xe_assert(xe, guc_id < (q->guc->id + q->width));
1559 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1562 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1566 trace_xe_exec_queue_deregister(q);
1568 xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
1571 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1573 struct xe_device *xe = guc_to_xe(guc);
1574 struct xe_exec_queue *q;
1575 u32 guc_id = msg[0];
1577 if (unlikely(len < 2)) {
1578 drm_err(&xe->drm, "Invalid length %u", len);
1582 q = g2h_exec_queue_lookup(guc, guc_id);
1586 if (unlikely(!exec_queue_pending_enable(q) &&
1587 !exec_queue_pending_disable(q))) {
1588 drm_err(&xe->drm, "Unexpected engine state 0x%04x",
1589 atomic_read(&q->guc->state));
1593 trace_xe_exec_queue_scheduling_done(q);
1595 if (exec_queue_pending_enable(q)) {
1596 q->guc->resume_time = ktime_get();
1597 clear_exec_queue_pending_enable(q);
1599 wake_up_all(&guc->ct.wq);
1601 clear_exec_queue_pending_disable(q);
1602 if (q->guc->suspend_pending) {
1603 suspend_fence_signal(q);
1605 if (exec_queue_banned(q)) {
1607 wake_up_all(&guc->ct.wq);
1609 deregister_exec_queue(guc, q);
1616 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1618 struct xe_device *xe = guc_to_xe(guc);
1619 struct xe_exec_queue *q;
1620 u32 guc_id = msg[0];
1622 if (unlikely(len < 1)) {
1623 drm_err(&xe->drm, "Invalid length %u", len);
1627 q = g2h_exec_queue_lookup(guc, guc_id);
1631 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
1632 exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
1633 drm_err(&xe->drm, "Unexpected engine state 0x%04x",
1634 atomic_read(&q->guc->state));
1638 trace_xe_exec_queue_deregister_done(q);
1640 clear_exec_queue_registered(q);
1642 if (exec_queue_banned(q) || xe_exec_queue_is_lr(q))
1643 xe_exec_queue_put(q);
1645 __guc_exec_queue_fini(guc, q);
1650 int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
1652 struct xe_device *xe = guc_to_xe(guc);
1653 struct xe_exec_queue *q;
1654 u32 guc_id = msg[0];
1656 if (unlikely(len < 1)) {
1657 drm_err(&xe->drm, "Invalid length %u", len);
1661 q = g2h_exec_queue_lookup(guc, guc_id);
1665 drm_info(&xe->drm, "Engine reset: guc_id=%d", guc_id);
1667 /* FIXME: Do error capture, most likely async */
1669 trace_xe_exec_queue_reset(q);
1672 * A banned engine is a NOP at this point (came from
1673 * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
1674 * jobs by setting timeout of the job to the minimum value kicking
1675 * guc_exec_queue_timedout_job.
1677 set_exec_queue_reset(q);
1678 if (!exec_queue_banned(q))
1679 xe_guc_exec_queue_trigger_cleanup(q);
1684 int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
1687 struct xe_device *xe = guc_to_xe(guc);
1688 struct xe_exec_queue *q;
1689 u32 guc_id = msg[0];
1691 if (unlikely(len < 1)) {
1692 drm_err(&xe->drm, "Invalid length %u", len);
1696 q = g2h_exec_queue_lookup(guc, guc_id);
1700 drm_dbg(&xe->drm, "Engine memory cat error: guc_id=%d", guc_id);
1701 trace_xe_exec_queue_memory_cat_error(q);
1703 /* Treat the same as engine reset */
1704 set_exec_queue_reset(q);
1705 if (!exec_queue_banned(q))
1706 xe_guc_exec_queue_trigger_cleanup(q);
1711 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
1713 struct xe_device *xe = guc_to_xe(guc);
1714 u8 guc_class, instance;
1717 if (unlikely(len != 3)) {
1718 drm_err(&xe->drm, "Invalid length %u", len);
1726 /* Unexpected failure of a hardware feature, log an actual error */
1727 drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
1728 guc_class, instance, reason);
1730 xe_gt_reset_async(guc_to_gt(guc));
1736 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
1737 struct xe_guc_submit_exec_queue_snapshot *snapshot)
1739 struct xe_guc *guc = exec_queue_to_guc(q);
1740 struct xe_device *xe = guc_to_xe(guc);
1741 struct iosys_map map = xe_lrc_parallel_map(q->lrc);
1744 snapshot->guc.wqi_head = q->guc->wqi_head;
1745 snapshot->guc.wqi_tail = q->guc->wqi_tail;
1746 snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
1747 snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
1748 snapshot->parallel.wq_desc.status = parallel_read(xe, map,
1751 if (snapshot->parallel.wq_desc.head !=
1752 snapshot->parallel.wq_desc.tail) {
1753 for (i = snapshot->parallel.wq_desc.head;
1754 i != snapshot->parallel.wq_desc.tail;
1755 i = (i + sizeof(u32)) % WQ_SIZE)
1756 snapshot->parallel.wq[i / sizeof(u32)] =
1757 parallel_read(xe, map, wq[i / sizeof(u32)]);
1762 guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
1763 struct drm_printer *p)
1767 drm_printf(p, "\tWQ head: %u (internal), %d (memory)\n",
1768 snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head);
1769 drm_printf(p, "\tWQ tail: %u (internal), %d (memory)\n",
1770 snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail);
1771 drm_printf(p, "\tWQ status: %u\n", snapshot->parallel.wq_desc.status);
1773 if (snapshot->parallel.wq_desc.head !=
1774 snapshot->parallel.wq_desc.tail) {
1775 for (i = snapshot->parallel.wq_desc.head;
1776 i != snapshot->parallel.wq_desc.tail;
1777 i = (i + sizeof(u32)) % WQ_SIZE)
1778 drm_printf(p, "\tWQ[%zu]: 0x%08x\n", i / sizeof(u32),
1779 snapshot->parallel.wq[i / sizeof(u32)]);
1784 * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
1785 * @job: faulty Xe scheduled job.
1787 * This can be printed out in a later stage like during dev_coredump
1790 * Returns: a GuC Submit Engine snapshot object that must be freed by the
1791 * caller, using `xe_guc_exec_queue_snapshot_free`.
1793 struct xe_guc_submit_exec_queue_snapshot *
1794 xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
1796 struct xe_exec_queue *q = job->q;
1797 struct xe_gpu_scheduler *sched = &q->guc->sched;
1798 struct xe_guc_submit_exec_queue_snapshot *snapshot;
1801 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
1806 snapshot->guc.id = q->guc->id;
1807 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
1808 snapshot->class = q->class;
1809 snapshot->logical_mask = q->logical_mask;
1810 snapshot->width = q->width;
1811 snapshot->refcount = kref_read(&q->refcount);
1812 snapshot->sched_timeout = sched->base.timeout;
1813 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
1814 snapshot->sched_props.preempt_timeout_us =
1815 q->sched_props.preempt_timeout_us;
1817 snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
1820 if (snapshot->lrc) {
1821 for (i = 0; i < q->width; ++i) {
1822 struct xe_lrc *lrc = q->lrc + i;
1824 snapshot->lrc[i].context_desc =
1825 lower_32_bits(xe_lrc_ggtt_addr(lrc));
1826 snapshot->lrc[i].head = xe_lrc_ring_head(lrc);
1827 snapshot->lrc[i].tail.internal = lrc->ring.tail;
1828 snapshot->lrc[i].tail.memory =
1829 xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
1830 snapshot->lrc[i].start_seqno = xe_lrc_start_seqno(lrc);
1831 snapshot->lrc[i].seqno = xe_lrc_seqno(lrc);
1835 snapshot->schedule_state = atomic_read(&q->guc->state);
1836 snapshot->exec_queue_flags = q->flags;
1838 snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
1839 if (snapshot->parallel_execution)
1840 guc_exec_queue_wq_snapshot_capture(q, snapshot);
1842 spin_lock(&sched->base.job_list_lock);
1843 snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
1844 snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
1845 sizeof(struct pending_list_snapshot),
1848 if (snapshot->pending_list) {
1849 struct xe_sched_job *job_iter;
1852 list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
1853 snapshot->pending_list[i].seqno =
1854 xe_sched_job_seqno(job_iter);
1855 snapshot->pending_list[i].fence =
1856 dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
1857 snapshot->pending_list[i].finished =
1858 dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
1864 spin_unlock(&sched->base.job_list_lock);
1870 * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
1871 * @snapshot: GuC Submit Engine snapshot object.
1872 * @p: drm_printer where it will be printed out.
1874 * This function prints out a given GuC Submit Engine snapshot object.
1877 xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
1878 struct drm_printer *p)
1885 drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
1886 drm_printf(p, "\tName: %s\n", snapshot->name);
1887 drm_printf(p, "\tClass: %d\n", snapshot->class);
1888 drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
1889 drm_printf(p, "\tWidth: %d\n", snapshot->width);
1890 drm_printf(p, "\tRef: %d\n", snapshot->refcount);
1891 drm_printf(p, "\tTimeout: %ld (ms)\n", snapshot->sched_timeout);
1892 drm_printf(p, "\tTimeslice: %u (us)\n",
1893 snapshot->sched_props.timeslice_us);
1894 drm_printf(p, "\tPreempt timeout: %u (us)\n",
1895 snapshot->sched_props.preempt_timeout_us);
1897 for (i = 0; snapshot->lrc && i < snapshot->width; ++i) {
1898 drm_printf(p, "\tHW Context Desc: 0x%08x\n",
1899 snapshot->lrc[i].context_desc);
1900 drm_printf(p, "\tLRC Head: (memory) %u\n",
1901 snapshot->lrc[i].head);
1902 drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
1903 snapshot->lrc[i].tail.internal,
1904 snapshot->lrc[i].tail.memory);
1905 drm_printf(p, "\tStart seqno: (memory) %d\n",
1906 snapshot->lrc[i].start_seqno);
1907 drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->lrc[i].seqno);
1909 drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
1910 drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
1912 if (snapshot->parallel_execution)
1913 guc_exec_queue_wq_snapshot_print(snapshot, p);
1915 for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
1917 drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
1918 snapshot->pending_list[i].seqno,
1919 snapshot->pending_list[i].fence,
1920 snapshot->pending_list[i].finished);
1924 * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
1926 * @snapshot: GuC Submit Engine snapshot object.
1928 * This function free all the memory that needed to be allocated at capture
1931 void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
1936 kfree(snapshot->lrc);
1937 kfree(snapshot->pending_list);
1941 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
1943 struct xe_guc_submit_exec_queue_snapshot *snapshot;
1944 struct xe_gpu_scheduler *sched = &q->guc->sched;
1945 struct xe_sched_job *job;
1948 spin_lock(&sched->base.job_list_lock);
1949 list_for_each_entry(job, &sched->base.pending_list, drm.list) {
1951 xe_sched_job_get(job);
1956 spin_unlock(&sched->base.job_list_lock);
1961 snapshot = xe_guc_exec_queue_snapshot_capture(job);
1962 xe_guc_exec_queue_snapshot_print(snapshot, p);
1963 xe_guc_exec_queue_snapshot_free(snapshot);
1965 xe_sched_job_put(job);
1969 * xe_guc_submit_print - GuC Submit Print.
1971 * @p: drm_printer where it will be printed out.
1973 * This function capture and prints snapshots of **all** GuC Engines.
1975 void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
1977 struct xe_exec_queue *q;
1978 unsigned long index;
1980 if (!xe_device_uc_enabled(guc_to_xe(guc)))
1983 mutex_lock(&guc->submission_state.lock);
1984 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1985 guc_exec_queue_print(q, p);
1986 mutex_unlock(&guc->submission_state.lock);