1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include "intel_gt_mcr.h"
9 #include "intel_gt_print.h"
10 #include "intel_gt_regs.h"
13 * DOC: GT Multicast/Replicated (MCR) Register Support
15 * Some GT registers are designed as "multicast" or "replicated" registers:
16 * multiple instances of the same register share a single MMIO offset. MCR
17 * registers are generally used when the hardware needs to potentially track
18 * independent values of a register per hardware unit (e.g., per-subslice,
19 * per-L3bank, etc.). The specific types of replication that exist vary
22 * MMIO accesses to MCR registers are controlled according to the settings
23 * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
24 * registers can be done in either a (i.e., a single write updates all
25 * instances of the register to the same value) or unicast (a write updates only
26 * one specific instance). Reads of MCR registers always operate in a unicast
27 * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
28 * Selection of a specific MCR instance for unicast operations is referred to
31 * If MCR register operations are steered toward a hardware unit that is
32 * fused off or currently powered down due to power gating, the MMIO operation
33 * is "terminated" by the hardware. Terminated read operations will return a
34 * value of zero and terminated unicast write operations will be silently
38 #define HAS_MSLICE_STEERING(i915) (INTEL_INFO(i915)->has_mslice_steering)
40 static const char * const intel_steering_types[] = {
50 static const struct intel_mmio_range icl_l3bank_steering_table[] = {
51 { 0x00B100, 0x00B3FF },
56 * Although the bspec lists more "MSLICE" ranges than shown here, some of those
57 * are of a "GAM" subclass that has special rules. Thus we use a separate
58 * GAM table farther down for those.
60 static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
61 { 0x00DD00, 0x00DDFF },
62 { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
66 static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
67 { 0x004000, 0x004AFF },
68 { 0x00C800, 0x00CFFF },
72 static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
73 { 0x00B000, 0x00B0FF },
74 { 0x00D800, 0x00D8FF },
78 static const struct intel_mmio_range dg2_lncf_steering_table[] = {
79 { 0x00B000, 0x00B0FF },
80 { 0x00D880, 0x00D8FF },
85 * We have several types of MCR registers on PVC where steering to (0,0)
86 * will always provide us with a non-terminated value. We'll stick them
87 * all in the same table for simplicity.
89 static const struct intel_mmio_range pvc_instance0_steering_table[] = {
90 { 0x004000, 0x004AFF }, /* HALF-BSLICE */
91 { 0x008800, 0x00887F }, /* CC */
92 { 0x008A80, 0x008AFF }, /* TILEPSMI */
93 { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
94 { 0x00B100, 0x00B3FF }, /* L3BANK */
95 { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
96 { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
97 { 0x00DD00, 0x00DDFF }, /* BSLICE */
98 { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
99 { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
100 { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
101 { 0x024180, 0x0241FF }, /* HALF-BSLICE */
105 static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
106 { 0x000B00, 0x000BFF }, /* SQIDI */
107 { 0x001000, 0x001FFF }, /* SQIDI */
108 { 0x004000, 0x0048FF }, /* GAM */
109 { 0x008700, 0x0087FF }, /* SQIDI */
110 { 0x00B000, 0x00B0FF }, /* NODE */
111 { 0x00C800, 0x00CFFF }, /* GAM */
112 { 0x00D880, 0x00D8FF }, /* NODE */
113 { 0x00DD00, 0x00DDFF }, /* OAAL2 */
117 static const struct intel_mmio_range xelpg_l3bank_steering_table[] = {
118 { 0x00B100, 0x00B3FF },
122 /* DSS steering is used for SLICE ranges as well */
123 static const struct intel_mmio_range xelpg_dss_steering_table[] = {
124 { 0x005200, 0x0052FF }, /* SLICE */
125 { 0x005500, 0x007FFF }, /* SLICE */
126 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
127 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
128 { 0x009680, 0x0096FF }, /* DSS */
129 { 0x00D800, 0x00D87F }, /* SLICE */
130 { 0x00DC00, 0x00DCFF }, /* SLICE */
131 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */
135 static const struct intel_mmio_range xelpmp_oaddrm_steering_table[] = {
136 { 0x393200, 0x39323F },
137 { 0x393400, 0x3934FF },
141 void intel_gt_mcr_init(struct intel_gt *gt)
143 struct drm_i915_private *i915 = gt->i915;
147 spin_lock_init(>->mcr_lock);
150 * An mslice is unavailable only if both the meml3 for the slice is
151 * disabled *and* all of the DSS in the slice (quadrant) are disabled.
153 if (HAS_MSLICE_STEERING(i915)) {
154 gt->info.mslice_mask =
155 intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
157 gt->info.mslice_mask |=
158 (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
159 GEN12_MEML3_EN_MASK);
161 if (!gt->info.mslice_mask) /* should be impossible! */
162 gt_warn(gt, "mslice mask all zero!\n");
165 if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
166 gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
167 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
169 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
170 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
171 fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
172 intel_uncore_read(gt->uncore,
173 MTL_GT_ACTIVITY_FACTOR));
175 fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
176 intel_uncore_read(gt->uncore, XEHP_FUSE4));
179 * Despite the register field being named "exclude mask" the
180 * bits actually represent enabled banks (two banks per bit).
182 for_each_set_bit(i, &fuse, 3)
183 gt->info.l3bank_mask |= 0x3 << 2 * i;
185 gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
186 gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
187 gt->steering_table[DSS] = xelpg_dss_steering_table;
188 } else if (IS_PONTEVECCHIO(i915)) {
189 gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
190 } else if (IS_DG2(i915)) {
191 gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
192 gt->steering_table[LNCF] = dg2_lncf_steering_table;
194 * No need to hook up the GAM table since it has a dedicated
195 * steering control register on DG2 and can use implicit
198 } else if (IS_XEHPSDV(i915)) {
199 gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
200 gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
201 gt->steering_table[GAM] = xehpsdv_gam_steering_table;
202 } else if (GRAPHICS_VER(i915) >= 11 &&
203 GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
204 gt->steering_table[L3BANK] = icl_l3bank_steering_table;
205 gt->info.l3bank_mask =
206 ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
208 if (!gt->info.l3bank_mask) /* should be impossible! */
209 gt_warn(gt, "L3 bank mask is all zero!\n");
210 } else if (GRAPHICS_VER(i915) >= 11) {
212 * We expect all modern platforms to have at least some
213 * type of steering that needs to be initialized.
215 MISSING_CASE(INTEL_INFO(i915)->platform);
220 * Although the rest of the driver should use MCR-specific functions to
221 * read/write MCR registers, we still use the regular intel_uncore_* functions
222 * internally to implement those, so we need a way for the functions in this
223 * file to "cast" an i915_mcr_reg_t into an i915_reg_t.
225 static i915_reg_t mcr_reg_cast(const i915_mcr_reg_t mcr)
227 i915_reg_t r = { .reg = mcr.reg };
233 * rw_with_mcr_steering_fw - Access a register with specific MCR steering
234 * @gt: GT to read register from
235 * @reg: register being accessed
236 * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
237 * @group: group number (documented as "sliceid" on older platforms)
238 * @instance: instance number (documented as "subsliceid" on older platforms)
239 * @value: register value to be written (ignored for read)
241 * Context: The caller must hold the MCR lock
242 * Return: 0 for write access. register value for read access.
244 * Caller needs to make sure the relevant forcewake wells are up.
246 static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
247 i915_mcr_reg_t reg, u8 rw_flag,
248 int group, int instance, u32 value)
250 struct intel_uncore *uncore = gt->uncore;
251 u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
253 lockdep_assert_held(>->mcr_lock);
255 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
257 * Always leave the hardware in multicast mode when doing reads
258 * (see comment about Wa_22013088509 below) and only change it
259 * to unicast mode when doing writes of a specific instance.
261 * No need to save old steering reg value.
263 intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR,
264 REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
265 REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance) |
266 (rw_flag == FW_REG_READ ? GEN11_MCR_MULTICAST : 0));
267 } else if (GRAPHICS_VER(uncore->i915) >= 11) {
268 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
269 mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
274 * The setting of the multicast/unicast bit usually wouldn't
275 * matter for read operations (which always return the value
276 * from a single register instance regardless of how that bit
277 * is set), but some platforms have a workaround requiring us
278 * to remain in multicast mode for reads. There's no real
279 * downside to this, so we'll just go ahead and do so on all
280 * platforms; we'll only clear the multicast bit from the mask
281 * when exlicitly doing a write operation.
283 if (rw_flag == FW_REG_WRITE)
284 mcr_mask |= GEN11_MCR_MULTICAST;
286 mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
291 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
293 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
294 mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
296 mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
301 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
304 if (rw_flag == FW_REG_READ)
305 val = intel_uncore_read_fw(uncore, mcr_reg_cast(reg));
307 intel_uncore_write_fw(uncore, mcr_reg_cast(reg), value);
310 * For pre-MTL platforms, we need to restore the old value of the
311 * steering control register to ensure that implicit steering continues
312 * to behave as expected. For MTL and beyond, we need only reinstate
313 * the 'multicast' bit (and only if we did a write that cleared it).
315 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70) && rw_flag == FW_REG_WRITE)
316 intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
317 else if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 70))
318 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, old_mcr);
323 static u32 rw_with_mcr_steering(struct intel_gt *gt,
324 i915_mcr_reg_t reg, u8 rw_flag,
325 int group, int instance,
328 struct intel_uncore *uncore = gt->uncore;
329 enum forcewake_domains fw_domains;
333 fw_domains = intel_uncore_forcewake_for_reg(uncore, mcr_reg_cast(reg),
335 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
337 FW_REG_READ | FW_REG_WRITE);
339 intel_gt_mcr_lock(gt, &flags);
340 spin_lock(&uncore->lock);
341 intel_uncore_forcewake_get__locked(uncore, fw_domains);
343 val = rw_with_mcr_steering_fw(gt, reg, rw_flag, group, instance, value);
345 intel_uncore_forcewake_put__locked(uncore, fw_domains);
346 spin_unlock(&uncore->lock);
347 intel_gt_mcr_unlock(gt, flags);
353 * intel_gt_mcr_lock - Acquire MCR steering lock
355 * @flags: storage to save IRQ flags to
357 * Performs locking to protect the steering for the duration of an MCR
358 * operation. On MTL and beyond, a hardware lock will also be taken to
359 * serialize access not only for the driver, but also for external hardware and
362 * Context: Takes gt->mcr_lock. uncore->lock should *not* be held when this
363 * function is called, although it may be acquired after this
366 void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
367 __acquires(>->mcr_lock)
369 unsigned long __flags;
372 lockdep_assert_not_held(>->uncore->lock);
375 * Starting with MTL, we need to coordinate not only with other
376 * driver threads, but also with hardware/firmware agents. A dedicated
377 * locking register is used.
379 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
381 * The steering control and semaphore registers are inside an
382 * "always on" power domain with respect to RC6. However there
383 * are some issues if higher-level platform sleep states are
384 * entering/exiting at the same time these registers are
385 * accessed. Grabbing GT forcewake and holding it over the
386 * entire lock/steer/unlock cycle ensures that those sleep
387 * states have been fully exited before we access these
388 * registers. This wakeref will be released in the unlock
391 * This is expected to become a formally documented/numbered
394 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
396 err = wait_for(intel_uncore_read_fw(gt->uncore,
397 MTL_STEER_SEMAPHORE) == 0x1, 100);
401 * Even on platforms with a hardware lock, we'll continue to grab
402 * a software spinlock too for lockdep purposes. If the hardware lock
403 * was already acquired, there should never be contention on the
406 spin_lock_irqsave(>->mcr_lock, __flags);
411 * In theory we should never fail to acquire the HW semaphore; this
412 * would indicate some hardware/firmware is misbehaving and not
413 * releasing it properly.
415 if (err == -ETIMEDOUT) {
416 gt_err_ratelimited(gt, "hardware MCR steering semaphore timed out");
417 add_taint_for_CI(gt->i915, TAINT_WARN); /* CI is now unreliable */
422 * intel_gt_mcr_unlock - Release MCR steering lock
424 * @flags: IRQ flags to restore
426 * Releases the lock acquired by intel_gt_mcr_lock().
428 * Context: Releases gt->mcr_lock
430 void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
431 __releases(>->mcr_lock)
433 spin_unlock_irqrestore(>->mcr_lock, flags);
435 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
436 intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
438 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
443 * intel_gt_mcr_lock_sanitize - Sanitize MCR steering lock
446 * This will be used to sanitize the initial status of the hardware lock
447 * during driver load and resume since there won't be any concurrent access
448 * from other agents at those times, but it's possible that boot firmware
449 * may have left the lock in a bad state.
452 void intel_gt_mcr_lock_sanitize(struct intel_gt *gt)
455 * This gets called at load/resume time, so we shouldn't be
456 * racing with other driver threads grabbing the mcr lock.
458 lockdep_assert_not_held(>->mcr_lock);
460 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
461 intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
465 * intel_gt_mcr_read - read a specific instance of an MCR register
467 * @reg: the MCR register to read
468 * @group: the MCR group
469 * @instance: the MCR instance
471 * Context: Takes and releases gt->mcr_lock
473 * Returns the value read from an MCR register after steering toward a specific
476 u32 intel_gt_mcr_read(struct intel_gt *gt,
478 int group, int instance)
480 return rw_with_mcr_steering(gt, reg, FW_REG_READ, group, instance, 0);
484 * intel_gt_mcr_unicast_write - write a specific instance of an MCR register
486 * @reg: the MCR register to write
487 * @value: value to write
488 * @group: the MCR group
489 * @instance: the MCR instance
491 * Write an MCR register in unicast mode after steering toward a specific
494 * Context: Calls a function that takes and releases gt->mcr_lock
496 void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value,
497 int group, int instance)
499 rw_with_mcr_steering(gt, reg, FW_REG_WRITE, group, instance, value);
503 * intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
505 * @reg: the MCR register to write
506 * @value: value to write
508 * Write an MCR register in multicast mode to update all instances.
510 * Context: Takes and releases gt->mcr_lock
512 void intel_gt_mcr_multicast_write(struct intel_gt *gt,
513 i915_mcr_reg_t reg, u32 value)
517 intel_gt_mcr_lock(gt, &flags);
520 * Ensure we have multicast behavior, just in case some non-i915 agent
521 * left the hardware in unicast mode.
523 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
524 intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
526 intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
528 intel_gt_mcr_unlock(gt, flags);
532 * intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
534 * @reg: the MCR register to write
535 * @value: value to write
537 * Write an MCR register in multicast mode to update all instances. This
538 * function assumes the caller is already holding any necessary forcewake
539 * domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
540 * be obtained automatically.
542 * Context: The caller must hold gt->mcr_lock.
544 void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
546 lockdep_assert_held(>->mcr_lock);
549 * Ensure we have multicast behavior, just in case some non-i915 agent
550 * left the hardware in unicast mode.
552 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
553 intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
555 intel_uncore_write_fw(gt->uncore, mcr_reg_cast(reg), value);
559 * intel_gt_mcr_multicast_rmw - Performs a multicast RMW operations
561 * @reg: the MCR register to read and write
562 * @clear: bits to clear during RMW
563 * @set: bits to set during RMW
565 * Performs a read-modify-write on an MCR register in a multicast manner.
566 * This operation only makes sense on MCR registers where all instances are
567 * expected to have the same value. The read will target any non-terminated
568 * instance and the write will be applied to all instances.
570 * This function assumes the caller is already holding any necessary forcewake
571 * domains; use intel_gt_mcr_multicast_rmw() in cases where forcewake should
572 * be obtained automatically.
574 * Context: Calls functions that take and release gt->mcr_lock
576 * Returns the old (unmodified) value read.
578 u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
581 u32 val = intel_gt_mcr_read_any(gt, reg);
583 intel_gt_mcr_multicast_write(gt, reg, (val & ~clear) | set);
589 * reg_needs_read_steering - determine whether a register read requires
592 * @reg: the register to check steering requirements for
593 * @type: type of multicast steering to check
595 * Determines whether @reg needs explicit steering of a specific type for
598 * Returns false if @reg does not belong to a register range of the given
599 * steering type, or if the default (subslice-based) steering IDs are suitable
600 * for @type steering too.
602 static bool reg_needs_read_steering(struct intel_gt *gt,
604 enum intel_steering_type type)
606 u32 offset = i915_mmio_reg_offset(reg);
607 const struct intel_mmio_range *entry;
609 if (likely(!gt->steering_table[type]))
612 if (IS_GSI_REG(offset))
613 offset += gt->uncore->gsi_offset;
615 for (entry = gt->steering_table[type]; entry->end; entry++) {
616 if (offset >= entry->start && offset <= entry->end)
624 * get_nonterminated_steering - determines valid IDs for a class of MCR steering
626 * @type: multicast register type
627 * @group: Group ID returned
628 * @instance: Instance ID returned
630 * Determines group and instance values that will steer reads of the specified
631 * MCR class to a non-terminated instance.
633 static void get_nonterminated_steering(struct intel_gt *gt,
634 enum intel_steering_type type,
635 u8 *group, u8 *instance)
641 *group = 0; /* unused */
642 *instance = __ffs(gt->info.l3bank_mask);
645 GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
646 *group = __ffs(gt->info.mslice_mask);
647 *instance = 0; /* unused */
651 * An LNCF is always present if its mslice is present, so we
652 * can safely just steer to LNCF 0 in all cases.
654 GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
655 *group = __ffs(gt->info.mslice_mask) << 1;
656 *instance = 0; /* unused */
659 *group = IS_DG2(gt->i915) ? 1 : 0;
663 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
664 *group = dss / GEN_DSS_PER_GSLICE;
665 *instance = dss % GEN_DSS_PER_GSLICE;
669 * There are a lot of MCR types for which instance (0, 0)
670 * will always provide a non-terminated value.
676 if ((VDBOX_MASK(gt) | VEBOX_MASK(gt) | gt->info.sfc_mask) & BIT(0))
690 * intel_gt_mcr_get_nonterminated_steering - find group/instance values that
691 * will steer a register to a non-terminated instance
693 * @reg: register for which the steering is required
694 * @group: return variable for group steering
695 * @instance: return variable for instance steering
697 * This function returns a group/instance pair that is guaranteed to work for
698 * read steering of the given register. Note that a value will be returned even
699 * if the register is not replicated and therefore does not actually require
702 void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
704 u8 *group, u8 *instance)
708 for (type = 0; type < NUM_STEERING_TYPES; type++) {
709 if (reg_needs_read_steering(gt, reg, type)) {
710 get_nonterminated_steering(gt, type, group, instance);
715 *group = gt->default_steering.groupid;
716 *instance = gt->default_steering.instanceid;
720 * intel_gt_mcr_read_any_fw - reads one instance of an MCR register
722 * @reg: register to read
724 * Reads a GT MCR register. The read will be steered to a non-terminated
725 * instance (i.e., one that isn't fused off or powered down by power gating).
726 * This function assumes the caller is already holding any necessary forcewake
727 * domains; use intel_gt_mcr_read_any() in cases where forcewake should be
728 * obtained automatically.
730 * Context: The caller must hold gt->mcr_lock.
732 * Returns the value from a non-terminated instance of @reg.
734 u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
739 lockdep_assert_held(>->mcr_lock);
741 for (type = 0; type < NUM_STEERING_TYPES; type++) {
742 if (reg_needs_read_steering(gt, reg, type)) {
743 get_nonterminated_steering(gt, type, &group, &instance);
744 return rw_with_mcr_steering_fw(gt, reg,
750 return intel_uncore_read_fw(gt->uncore, mcr_reg_cast(reg));
754 * intel_gt_mcr_read_any - reads one instance of an MCR register
756 * @reg: register to read
758 * Reads a GT MCR register. The read will be steered to a non-terminated
759 * instance (i.e., one that isn't fused off or powered down by power gating).
761 * Context: Calls a function that takes and releases gt->mcr_lock.
763 * Returns the value from a non-terminated instance of @reg.
765 u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
770 for (type = 0; type < NUM_STEERING_TYPES; type++) {
771 if (reg_needs_read_steering(gt, reg, type)) {
772 get_nonterminated_steering(gt, type, &group, &instance);
773 return rw_with_mcr_steering(gt, reg,
779 return intel_uncore_read(gt->uncore, mcr_reg_cast(reg));
782 static void report_steering_type(struct drm_printer *p,
784 enum intel_steering_type type,
787 const struct intel_mmio_range *entry;
790 BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
792 if (!gt->steering_table[type]) {
793 drm_printf(p, "%s steering: uses default steering\n",
794 intel_steering_types[type]);
798 get_nonterminated_steering(gt, type, &group, &instance);
799 drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
800 intel_steering_types[type], group, instance);
805 for (entry = gt->steering_table[type]; entry->end; entry++)
806 drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
809 void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
813 * Starting with MTL we no longer have default steering;
814 * all ranges are explicitly steered.
816 if (GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70))
817 drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
818 gt->default_steering.groupid,
819 gt->default_steering.instanceid);
821 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
822 for (int i = 0; i < NUM_STEERING_TYPES; i++)
823 if (gt->steering_table[i])
824 report_steering_type(p, gt, i, dump_table);
825 } else if (IS_PONTEVECCHIO(gt->i915)) {
826 report_steering_type(p, gt, INSTANCE0, dump_table);
827 } else if (HAS_MSLICE_STEERING(gt->i915)) {
828 report_steering_type(p, gt, MSLICE, dump_table);
829 report_steering_type(p, gt, LNCF, dump_table);
834 * intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
836 * @dss: DSS ID to obtain steering for
837 * @group: pointer to storage for steering group ID
838 * @instance: pointer to storage for steering instance ID
840 * Returns the steering IDs (via the @group and @instance parameters) that
841 * correspond to a specific subslice/DSS ID.
843 void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
844 unsigned int *group, unsigned int *instance)
846 if (IS_PONTEVECCHIO(gt->i915)) {
847 *group = dss / GEN_DSS_PER_CSLICE;
848 *instance = dss % GEN_DSS_PER_CSLICE;
849 } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
850 *group = dss / GEN_DSS_PER_GSLICE;
851 *instance = dss % GEN_DSS_PER_GSLICE;
853 *group = dss / GEN_MAX_SS_PER_HSW_SLICE;
854 *instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
860 * intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
862 * @reg: the register to read
863 * @mask: mask to apply to register value
864 * @value: value to wait for
865 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
866 * @slow_timeout_ms: slow timeout in millisecond
868 * This routine waits until the target register @reg contains the expected
869 * @value after applying the @mask, i.e. it waits until ::
871 * (intel_gt_mcr_read_any_fw(gt, reg) & mask) == value
873 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
874 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
875 * must be not larger than 20,0000 microseconds.
877 * This function is basically an MCR-friendly version of
878 * __intel_wait_for_register_fw(). Generally this function will only be used
879 * on GAM registers which are a bit special --- although they're MCR registers,
880 * reads (e.g., waiting for status updates) are always directed to the primary
883 * Note that this routine assumes the caller holds forcewake asserted, it is
884 * not suitable for very long waits.
886 * Context: Calls a function that takes and releases gt->mcr_lock
887 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
889 int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
893 unsigned int fast_timeout_us,
894 unsigned int slow_timeout_ms)
898 lockdep_assert_not_held(>->mcr_lock);
900 #define done ((intel_gt_mcr_read_any(gt, reg) & mask) == value)
902 /* Catch any overuse of this function */
903 might_sleep_if(slow_timeout_ms);
904 GEM_BUG_ON(fast_timeout_us > 20000);
905 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
908 if (fast_timeout_us && fast_timeout_us <= 20000)
909 ret = _wait_for_atomic(done, fast_timeout_us, 0);
910 if (ret && slow_timeout_ms)
911 ret = wait_for(done, slow_timeout_ms);