1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2019 Intel Corporation
6 #include "gt/intel_gt.h"
7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_pm_irq.h"
10 #include "intel_guc_ads.h"
11 #include "intel_guc_submission.h"
17 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
18 * designed to offload some of the functionality usually performed by the host
19 * driver; currently the main operations it can take care of are:
21 * - Authentication of the HuC, which is required to fully enable HuC usage.
22 * - Low latency graphics context scheduling (a.k.a. GuC submission).
23 * - GT Power management.
25 * The enable_guc module parameter can be used to select which of those
26 * operations to enable within GuC. Note that not all the operations are
27 * supported on all gen9+ platforms.
29 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
30 * if at least one of the operations is selected. However, not loading the GuC
31 * might result in the loss of some features that do require the GuC (currently
32 * just the HuC, but more are expected to land in the future).
35 static void gen8_guc_raise_irq(struct intel_guc *guc)
37 struct intel_gt *gt = guc_to_gt(guc);
39 intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
42 static void gen11_guc_raise_irq(struct intel_guc *guc)
44 struct intel_gt *gt = guc_to_gt(guc);
46 intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
49 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
51 GEM_BUG_ON(!guc->send_regs.base);
52 GEM_BUG_ON(!guc->send_regs.count);
53 GEM_BUG_ON(i >= guc->send_regs.count);
55 return _MMIO(guc->send_regs.base + 4 * i);
58 void intel_guc_init_send_regs(struct intel_guc *guc)
60 struct intel_gt *gt = guc_to_gt(guc);
61 enum forcewake_domains fw_domains = 0;
64 if (INTEL_GEN(gt->i915) >= 11) {
66 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
67 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
69 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
70 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
71 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
74 for (i = 0; i < guc->send_regs.count; i++) {
75 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
77 FW_REG_READ | FW_REG_WRITE);
79 guc->send_regs.fw_domains = fw_domains;
82 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
84 struct intel_gt *gt = guc_to_gt(guc);
86 assert_rpm_wakelock_held(>->i915->runtime_pm);
88 spin_lock_irq(>->irq_lock);
89 gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
90 spin_unlock_irq(>->irq_lock);
93 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
95 struct intel_gt *gt = guc_to_gt(guc);
97 assert_rpm_wakelock_held(>->i915->runtime_pm);
99 spin_lock_irq(>->irq_lock);
100 if (!guc->interrupts.enabled) {
101 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
103 guc->interrupts.enabled = true;
104 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
106 spin_unlock_irq(>->irq_lock);
109 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
111 struct intel_gt *gt = guc_to_gt(guc);
113 assert_rpm_wakelock_held(>->i915->runtime_pm);
115 spin_lock_irq(>->irq_lock);
116 guc->interrupts.enabled = false;
118 gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
120 spin_unlock_irq(>->irq_lock);
121 intel_synchronize_irq(gt->i915);
123 gen9_reset_guc_interrupts(guc);
126 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
128 struct intel_gt *gt = guc_to_gt(guc);
130 spin_lock_irq(>->irq_lock);
131 gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
132 spin_unlock_irq(>->irq_lock);
135 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
137 struct intel_gt *gt = guc_to_gt(guc);
139 spin_lock_irq(>->irq_lock);
140 if (!guc->interrupts.enabled) {
141 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
143 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
144 intel_uncore_write(gt->uncore,
145 GEN11_GUC_SG_INTR_ENABLE, events);
146 intel_uncore_write(gt->uncore,
147 GEN11_GUC_SG_INTR_MASK, ~events);
148 guc->interrupts.enabled = true;
150 spin_unlock_irq(>->irq_lock);
153 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
155 struct intel_gt *gt = guc_to_gt(guc);
157 spin_lock_irq(>->irq_lock);
158 guc->interrupts.enabled = false;
160 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
161 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
163 spin_unlock_irq(>->irq_lock);
164 intel_synchronize_irq(gt->i915);
166 gen11_reset_guc_interrupts(guc);
169 void intel_guc_init_early(struct intel_guc *guc)
171 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
173 intel_guc_fw_init_early(guc);
174 intel_guc_ct_init_early(&guc->ct);
175 intel_guc_log_init_early(&guc->log);
176 intel_guc_submission_init_early(guc);
178 mutex_init(&guc->send_mutex);
179 spin_lock_init(&guc->irq_lock);
180 guc->send = intel_guc_send_nop;
181 guc->handler = intel_guc_to_host_event_handler_nop;
182 if (INTEL_GEN(i915) >= 11) {
183 guc->notify = gen11_guc_raise_irq;
184 guc->interrupts.reset = gen11_reset_guc_interrupts;
185 guc->interrupts.enable = gen11_enable_guc_interrupts;
186 guc->interrupts.disable = gen11_disable_guc_interrupts;
188 guc->notify = gen8_guc_raise_irq;
189 guc->interrupts.reset = gen9_reset_guc_interrupts;
190 guc->interrupts.enable = gen9_enable_guc_interrupts;
191 guc->interrupts.disable = gen9_disable_guc_interrupts;
195 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
197 u32 level = intel_guc_log_get_level(&guc->log);
200 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
201 flags |= GUC_LOG_DISABLED;
203 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
204 GUC_LOG_VERBOSITY_SHIFT;
209 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
213 if (!intel_guc_is_submission_supported(guc))
214 flags |= GUC_CTL_DISABLE_SCHEDULER;
219 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
223 if (intel_guc_is_submission_supported(guc)) {
226 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
227 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
230 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
231 (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
236 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
238 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
241 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
243 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
249 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
250 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
251 BUILD_BUG_ON(!DPC_BUFFER_SIZE);
252 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
253 BUILD_BUG_ON(!ISR_BUFFER_SIZE);
254 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
256 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
257 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
258 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
259 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
260 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
261 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
263 flags = GUC_LOG_VALID |
264 GUC_LOG_NOTIFY_ON_HALF_FULL |
266 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
267 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
268 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
269 (offset << GUC_LOG_BUF_ADDR_SHIFT);
277 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
279 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
280 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
286 * Initialise the GuC parameter block before starting the firmware
287 * transfer. These parameters are read by the firmware on startup
288 * and cannot be changed thereafter.
290 static void guc_init_params(struct intel_guc *guc)
292 u32 *params = guc->params;
295 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
297 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
298 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
299 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
300 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
301 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
303 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
304 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
308 * Initialise the GuC parameter block before starting the firmware
309 * transfer. These parameters are read by the firmware on startup
310 * and cannot be changed thereafter.
312 void intel_guc_write_params(struct intel_guc *guc)
314 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
318 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
319 * they are power context saved so it's ok to release forcewake
320 * when we are done here and take it again at xfer time.
322 intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
324 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
326 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
327 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
329 intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
332 int intel_guc_init(struct intel_guc *guc)
334 struct intel_gt *gt = guc_to_gt(guc);
337 ret = intel_uc_fw_init(&guc->fw);
341 ret = intel_guc_log_create(&guc->log);
345 ret = intel_guc_ads_create(guc);
348 GEM_BUG_ON(!guc->ads_vma);
350 ret = intel_guc_ct_init(&guc->ct);
354 if (intel_guc_is_submission_supported(guc)) {
356 * This is stuff we need to have available at fw load time
357 * if we are planning to enable submission later
359 ret = intel_guc_submission_init(guc);
364 /* now that everything is perma-pinned, initialize the parameters */
365 guc_init_params(guc);
367 /* We need to notify the guc whenever we change the GGTT */
368 i915_ggtt_enable_guc(gt->ggtt);
373 intel_guc_ct_fini(&guc->ct);
375 intel_guc_ads_destroy(guc);
377 intel_guc_log_destroy(&guc->log);
379 intel_uc_fw_fini(&guc->fw);
381 intel_uc_fw_cleanup_fetch(&guc->fw);
382 DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
386 void intel_guc_fini(struct intel_guc *guc)
388 struct intel_gt *gt = guc_to_gt(guc);
390 if (!intel_uc_fw_is_available(&guc->fw))
393 i915_ggtt_disable_guc(gt->ggtt);
395 if (intel_guc_is_submission_supported(guc))
396 intel_guc_submission_fini(guc);
398 intel_guc_ct_fini(&guc->ct);
400 intel_guc_ads_destroy(guc);
401 intel_guc_log_destroy(&guc->log);
402 intel_uc_fw_fini(&guc->fw);
403 intel_uc_fw_cleanup_fetch(&guc->fw);
406 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
407 u32 *response_buf, u32 response_buf_size)
409 WARN(1, "Unexpected send: action=%#x\n", *action);
413 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
415 WARN(1, "Unexpected event: no suitable handler\n");
419 * This function implements the MMIO based host to GuC interface.
421 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
422 u32 *response_buf, u32 response_buf_size)
424 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
430 GEM_BUG_ON(len > guc->send_regs.count);
432 /* We expect only action code */
433 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
435 /* If CT is available, we expect to use MMIO only during init/fini */
436 GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
437 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
439 mutex_lock(&guc->send_mutex);
440 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
442 for (i = 0; i < len; i++)
443 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
445 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
447 intel_guc_notify(guc);
450 * No GuC command should ever take longer than 10ms.
451 * Fast commands should still complete in 10us.
453 ret = __intel_wait_for_register_fw(uncore,
454 guc_send_reg(guc, 0),
455 INTEL_GUC_MSG_TYPE_MASK,
456 INTEL_GUC_MSG_TYPE_RESPONSE <<
457 INTEL_GUC_MSG_TYPE_SHIFT,
459 /* If GuC explicitly returned an error, convert it to -EIO */
460 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
464 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
465 action[0], ret, status);
470 int count = min(response_buf_size, guc->send_regs.count - 1);
472 for (i = 0; i < count; i++)
473 response_buf[i] = intel_uncore_read(uncore,
474 guc_send_reg(guc, i + 1));
477 /* Use data from the GuC response as our return value */
478 ret = INTEL_GUC_MSG_TO_DATA(status);
481 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
482 mutex_unlock(&guc->send_mutex);
487 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
488 const u32 *payload, u32 len)
495 /* Make sure to handle only enabled messages */
496 msg = payload[0] & guc->msg_enabled_mask;
498 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
499 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
500 intel_guc_log_handle_flush_event(&guc->log);
505 int intel_guc_sample_forcewake(struct intel_guc *guc)
507 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
510 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
511 /* WaRsDisableCoarsePowerGating:skl,cnl */
512 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
515 /* bit 0 and 1 are for Render and Media domain separately */
516 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
518 return intel_guc_send(guc, action, ARRAY_SIZE(action));
522 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
523 * @guc: intel_guc structure
524 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
526 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
527 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
530 * Return: non-zero code on error
532 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
535 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
539 return intel_guc_send(guc, action, ARRAY_SIZE(action));
543 * intel_guc_suspend() - notify GuC entering suspend state
546 int intel_guc_suspend(struct intel_guc *guc)
548 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
552 INTEL_GUC_ACTION_ENTER_S_STATE,
553 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
557 * If GuC communication is enabled but submission is not supported,
558 * we do not need to suspend the GuC.
560 if (!intel_guc_submission_is_enabled(guc))
564 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
565 * and then returns, so waiting on the H2G is not enough to guarantee
566 * GuC is done. When all the processing is done, GuC writes
567 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
568 * on that. Note that GuC does not ensure that the value in the register
569 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
570 * in progress so we need to take care of that ourselves as well.
573 intel_uncore_write(uncore, SOFT_SCRATCH(14),
574 INTEL_GUC_SLEEP_STATE_INVALID_MASK);
576 ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
580 ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
581 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
586 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
587 DRM_ERROR("GuC failed to change sleep state. "
588 "action=0x%x, err=%u\n",
597 * intel_guc_reset_engine() - ask GuC to reset an engine
598 * @guc: intel_guc structure
599 * @engine: engine to be reset
601 int intel_guc_reset_engine(struct intel_guc *guc,
602 struct intel_engine_cs *engine)
604 /* XXX: to be implemented with submission interface rework */
610 * intel_guc_resume() - notify GuC resuming from suspend state
613 int intel_guc_resume(struct intel_guc *guc)
616 INTEL_GUC_ACTION_EXIT_S_STATE,
621 * If GuC communication is enabled but submission is not supported,
622 * we do not need to resume the GuC but we do need to enable the
623 * GuC communication on resume (above).
625 if (!intel_guc_submission_is_enabled(guc))
628 return intel_guc_send(guc, action, ARRAY_SIZE(action));
632 * DOC: GuC Memory Management
634 * GuC can't allocate any memory for its own usage, so all the allocations must
635 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
636 * exception of the top and bottom parts of the 4GB address space, which are
637 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
638 * or other parts of the HW. The driver must take care not to place objects that
639 * the GuC is going to access in these reserved ranges. The layout of the GuC
640 * address space is shown below:
644 * +===========> +====================+ <== FFFF_FFFF
646 * | +====================+ <== GUC_GGTT_TOP
650 * Address +===> +====================+ <== GuC ggtt_pin_bias
658 * +=======+===> +====================+ <== 0000_0000
660 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
661 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
662 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
666 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
668 * @size: size of area to allocate (both virtual space and memory)
670 * This is a wrapper to create an object for use with the GuC. In order to
671 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
672 * both some backing storage and a range inside the Global GTT. We must pin
673 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
674 * range is reserved inside GuC.
676 * Return: A i915_vma if successful, otherwise an ERR_PTR.
678 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
680 struct intel_gt *gt = guc_to_gt(guc);
681 struct drm_i915_gem_object *obj;
682 struct i915_vma *vma;
686 obj = i915_gem_object_create_shmem(gt->i915, size);
688 return ERR_CAST(obj);
690 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
694 flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
695 ret = i915_vma_pin(vma, 0, 0, flags);
701 return i915_vma_make_unshrinkable(vma);
704 i915_gem_object_put(obj);