2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/pm_runtime.h>
10 #include "i915_vgpu.h"
12 #include "intel_gt_pm.h"
13 #include "intel_rc6.h"
14 #include "intel_sideband.h"
19 * RC6 is a special power stage which allows the GPU to enter an very
20 * low-voltage mode when idle, using down to 0V while at this stage. This
21 * stage is entered automatically when the GPU is idle when RC6 support is
22 * enabled, and as soon as new workload arises GPU wakes up automatically as
25 * There are different RC6 modes available in Intel GPU, which differentiate
26 * among each other with the latency required to enter and leave RC6 and
27 * voltage consumed by the GPU in different states.
29 * The combination of the following flags define which states GPU is allowed
30 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
31 * RC6pp is deepest RC6. Their support by hardware varies according to the
32 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
33 * which brings the most power savings; deeper states save more power, but
34 * require higher latency to switch to and wake up.
37 static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
39 return container_of(rc6, struct intel_gt, rc6);
42 static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
44 return rc6_to_gt(rc)->uncore;
47 static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
49 return rc6_to_gt(rc)->i915;
52 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
54 intel_uncore_write_fw(uncore, reg, val);
57 static void gen11_rc6_enable(struct intel_rc6 *rc6)
59 struct intel_uncore *uncore = rc6_to_uncore(rc6);
60 struct intel_engine_cs *engine;
61 enum intel_engine_id id;
63 /* 2b: Program RC6 thresholds.*/
64 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
65 set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
67 set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
68 set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
69 for_each_engine(engine, rc6_to_gt(rc6), id)
70 set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
72 set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
74 set(uncore, GEN6_RC_SLEEP, 0);
76 set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
79 * 2c: Program Coarse Power Gating Policies.
81 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
82 * use instead is a more conservative estimate for the maximum time
83 * it takes us to service a CS interrupt and submit a new ELSP - that
84 * is the time which the GPU is idle waiting for the CPU to select the
85 * next request to execute. If the idle hysteresis is less than that
86 * interrupt service latency, the hardware will automatically gate
87 * the power well and we will then incur the wake up cost on top of
88 * the service latency. A similar guide from plane_state is that we
89 * do not want the enable hysteresis to less than the wakeup latency.
91 * igt/gem_exec_nop/sequential provides a rough estimate for the
92 * service latency, and puts it under 10us for Icelake, similar to
93 * Broadwell+, To be conservative, we want to factor in a context
94 * switch on top (due to ksoftirqd).
96 set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
97 set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
101 GEN6_RC_CTL_HW_ENABLE |
102 GEN6_RC_CTL_RC6_ENABLE |
103 GEN6_RC_CTL_EI_MODE(1);
105 set(uncore, GEN9_PG_ENABLE,
106 GEN9_RENDER_PG_ENABLE |
107 GEN9_MEDIA_PG_ENABLE |
108 GEN11_MEDIA_SAMPLER_PG_ENABLE);
111 static void gen9_rc6_enable(struct intel_rc6 *rc6)
113 struct intel_uncore *uncore = rc6_to_uncore(rc6);
114 struct intel_engine_cs *engine;
115 enum intel_engine_id id;
118 /* 2b: Program RC6 thresholds.*/
119 if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
120 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
121 set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
122 } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
124 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
125 * when CPG is enabled
127 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
129 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
132 set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
133 set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
134 for_each_engine(engine, rc6_to_gt(rc6), id)
135 set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
137 set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
139 set(uncore, GEN6_RC_SLEEP, 0);
142 * 2c: Program Coarse Power Gating Policies.
144 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
145 * use instead is a more conservative estimate for the maximum time
146 * it takes us to service a CS interrupt and submit a new ELSP - that
147 * is the time which the GPU is idle waiting for the CPU to select the
148 * next request to execute. If the idle hysteresis is less than that
149 * interrupt service latency, the hardware will automatically gate
150 * the power well and we will then incur the wake up cost on top of
151 * the service latency. A similar guide from plane_state is that we
152 * do not want the enable hysteresis to less than the wakeup latency.
154 * igt/gem_exec_nop/sequential provides a rough estimate for the
155 * service latency, and puts it around 10us for Broadwell (and other
156 * big core) and around 40us for Broxton (and other low power cores).
157 * [Note that for legacy ringbuffer submission, this is less than 1us!]
158 * However, the wakeup latency on Broxton is closer to 100us. To be
159 * conservative, we have to factor in a context switch on top (due
162 set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
163 set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
166 set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
168 /* WaRsUseTimeoutMode:cnl (pre-prod) */
169 if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
170 rc6_mode = GEN7_RC_CTL_TO_MODE;
172 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
175 GEN6_RC_CTL_HW_ENABLE |
176 GEN6_RC_CTL_RC6_ENABLE |
180 * WaRsDisableCoarsePowerGating:skl,cnl
181 * - Render/Media PG need to be disabled with RC6.
183 if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
184 set(uncore, GEN9_PG_ENABLE,
185 GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
188 static void gen8_rc6_enable(struct intel_rc6 *rc6)
190 struct intel_uncore *uncore = rc6_to_uncore(rc6);
191 struct intel_engine_cs *engine;
192 enum intel_engine_id id;
194 /* 2b: Program RC6 thresholds.*/
195 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
196 set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
197 set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
198 for_each_engine(engine, rc6_to_gt(rc6), id)
199 set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
200 set(uncore, GEN6_RC_SLEEP, 0);
201 set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
205 GEN6_RC_CTL_HW_ENABLE |
206 GEN7_RC_CTL_TO_MODE |
207 GEN6_RC_CTL_RC6_ENABLE;
210 static void gen6_rc6_enable(struct intel_rc6 *rc6)
212 struct intel_uncore *uncore = rc6_to_uncore(rc6);
213 struct drm_i915_private *i915 = rc6_to_i915(rc6);
214 struct intel_engine_cs *engine;
215 enum intel_engine_id id;
216 u32 rc6vids, rc6_mask;
219 set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
220 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
221 set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
222 set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
223 set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
225 for_each_engine(engine, rc6_to_gt(rc6), id)
226 set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
228 set(uncore, GEN6_RC_SLEEP, 0);
229 set(uncore, GEN6_RC1e_THRESHOLD, 1000);
230 set(uncore, GEN6_RC6_THRESHOLD, 50000);
231 set(uncore, GEN6_RC6p_THRESHOLD, 150000);
232 set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
234 /* We don't use those on Haswell */
235 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
237 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
239 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
242 GEN6_RC_CTL_EI_MODE(1) |
243 GEN6_RC_CTL_HW_ENABLE;
246 ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
248 if (IS_GEN(i915, 6) && ret) {
249 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
250 } else if (IS_GEN(i915, 6) &&
251 (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
252 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
253 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
255 rc6vids |= GEN6_ENCODE_RC6_VID(450);
256 ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
258 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
262 /* Check that the pcbr address is not empty. */
263 static int chv_rc6_init(struct intel_rc6 *rc6)
265 struct intel_uncore *uncore = rc6_to_uncore(rc6);
266 resource_size_t pctx_paddr, paddr;
267 resource_size_t pctx_size = 32 * SZ_1K;
270 pcbr = intel_uncore_read(uncore, VLV_PCBR);
271 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
272 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
273 paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
274 GEM_BUG_ON(paddr > U32_MAX);
276 pctx_paddr = (paddr & ~4095);
277 intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
283 static int vlv_rc6_init(struct intel_rc6 *rc6)
285 struct drm_i915_private *i915 = rc6_to_i915(rc6);
286 struct intel_uncore *uncore = rc6_to_uncore(rc6);
287 struct drm_i915_gem_object *pctx;
288 resource_size_t pctx_paddr;
289 resource_size_t pctx_size = 24 * SZ_1K;
292 pcbr = intel_uncore_read(uncore, VLV_PCBR);
294 /* BIOS set it up already, grab the pre-alloc'd space */
295 resource_size_t pcbr_offset;
297 pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
298 pctx = i915_gem_object_create_stolen_for_preallocated(i915,
302 return PTR_ERR(pctx);
307 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
310 * From the Gunit register HAS:
311 * The Gfx driver is expected to program this register and ensure
312 * proper allocation within Gfx stolen memory. For example, this
313 * register should be programmed such than the PCBR range does not
314 * overlap with other ranges, such as the frame buffer, protected
315 * memory, or any other relevant ranges.
317 pctx = i915_gem_object_create_stolen(i915, pctx_size);
319 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
320 return PTR_ERR(pctx);
323 GEM_BUG_ON(range_overflows_t(u64,
327 pctx_paddr = i915->dsm.start + pctx->stolen->start;
328 intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
335 static void chv_rc6_enable(struct intel_rc6 *rc6)
337 struct intel_uncore *uncore = rc6_to_uncore(rc6);
338 struct intel_engine_cs *engine;
339 enum intel_engine_id id;
341 /* 2a: Program RC6 thresholds.*/
342 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
343 set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
344 set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
346 for_each_engine(engine, rc6_to_gt(rc6), id)
347 set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
348 set(uncore, GEN6_RC_SLEEP, 0);
350 /* TO threshold set to 500 us (0x186 * 1.28 us) */
351 set(uncore, GEN6_RC6_THRESHOLD, 0x186);
353 /* Allows RC6 residency counter to work */
354 set(uncore, VLV_COUNTER_CONTROL,
355 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
356 VLV_MEDIA_RC6_COUNT_EN |
357 VLV_RENDER_RC6_COUNT_EN));
360 rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
363 static void vlv_rc6_enable(struct intel_rc6 *rc6)
365 struct intel_uncore *uncore = rc6_to_uncore(rc6);
366 struct intel_engine_cs *engine;
367 enum intel_engine_id id;
369 set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
370 set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
371 set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
373 for_each_engine(engine, rc6_to_gt(rc6), id)
374 set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
376 set(uncore, GEN6_RC6_THRESHOLD, 0x557);
378 /* Allows RC6 residency counter to work */
379 set(uncore, VLV_COUNTER_CONTROL,
380 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
381 VLV_MEDIA_RC0_COUNT_EN |
382 VLV_RENDER_RC0_COUNT_EN |
383 VLV_MEDIA_RC6_COUNT_EN |
384 VLV_RENDER_RC6_COUNT_EN));
387 GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
390 static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
392 struct intel_uncore *uncore = rc6_to_uncore(rc6);
393 struct drm_i915_private *i915 = rc6_to_i915(rc6);
394 u32 rc6_ctx_base, rc_ctl, rc_sw_target;
395 bool enable_rc6 = true;
397 rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
398 rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
399 rc_sw_target &= RC_SW_TARGET_STATE_MASK;
400 rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
401 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
402 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
403 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
404 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
407 if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
408 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
413 * The exact context size is not known for BXT, so assume a page size
417 intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
418 if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
419 rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
420 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
424 if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 &&
425 (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
426 (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
427 (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
428 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
432 if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
433 !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
434 !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
435 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
439 if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
440 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
444 if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
445 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
452 static bool rc6_supported(struct intel_rc6 *rc6)
454 struct drm_i915_private *i915 = rc6_to_i915(rc6);
459 if (intel_vgpu_active(i915))
462 if (is_mock_gt(rc6_to_gt(rc6)))
465 if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
466 dev_notice(i915->drm.dev,
467 "RC6 and powersaving disabled by BIOS\n");
474 static void rpm_get(struct intel_rc6 *rc6)
476 GEM_BUG_ON(rc6->wakeref);
477 pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
481 static void rpm_put(struct intel_rc6 *rc6)
483 GEM_BUG_ON(!rc6->wakeref);
484 pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
485 rc6->wakeref = false;
488 static bool pctx_corrupted(struct intel_rc6 *rc6)
490 struct drm_i915_private *i915 = rc6_to_i915(rc6);
492 if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
495 if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
498 dev_notice(i915->drm.dev,
499 "RC6 context corruption, disabling runtime power management\n");
503 static void __intel_rc6_disable(struct intel_rc6 *rc6)
505 struct drm_i915_private *i915 = rc6_to_i915(rc6);
506 struct intel_uncore *uncore = rc6_to_uncore(rc6);
508 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
509 if (INTEL_GEN(i915) >= 9)
510 set(uncore, GEN9_PG_ENABLE, 0);
511 set(uncore, GEN6_RC_CONTROL, 0);
512 set(uncore, GEN6_RC_STATE, 0);
513 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
516 void intel_rc6_init(struct intel_rc6 *rc6)
518 struct drm_i915_private *i915 = rc6_to_i915(rc6);
521 /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
524 if (!rc6_supported(rc6))
527 if (IS_CHERRYVIEW(i915))
528 err = chv_rc6_init(rc6);
529 else if (IS_VALLEYVIEW(i915))
530 err = vlv_rc6_init(rc6);
534 /* Sanitize rc6, ensure it is disabled before we are ready. */
535 __intel_rc6_disable(rc6);
537 rc6->supported = err == 0;
540 void intel_rc6_sanitize(struct intel_rc6 *rc6)
542 memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
544 if (rc6->enabled) { /* unbalanced suspend/resume */
546 rc6->enabled = false;
550 __intel_rc6_disable(rc6);
553 void intel_rc6_enable(struct intel_rc6 *rc6)
555 struct drm_i915_private *i915 = rc6_to_i915(rc6);
556 struct intel_uncore *uncore = rc6_to_uncore(rc6);
561 GEM_BUG_ON(rc6->enabled);
563 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
565 if (IS_CHERRYVIEW(i915))
567 else if (IS_VALLEYVIEW(i915))
569 else if (INTEL_GEN(i915) >= 11)
570 gen11_rc6_enable(rc6);
571 else if (INTEL_GEN(i915) >= 9)
572 gen9_rc6_enable(rc6);
573 else if (IS_BROADWELL(i915))
574 gen8_rc6_enable(rc6);
575 else if (INTEL_GEN(i915) >= 6)
576 gen6_rc6_enable(rc6);
578 rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
579 if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
582 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
584 if (unlikely(pctx_corrupted(rc6)))
587 /* rc6 is ready, runtime-pm is go! */
592 void intel_rc6_unpark(struct intel_rc6 *rc6)
594 struct intel_uncore *uncore = rc6_to_uncore(rc6);
599 /* Restore HW timers for automatic RC6 entry while busy */
600 set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
603 void intel_rc6_park(struct intel_rc6 *rc6)
605 struct intel_uncore *uncore = rc6_to_uncore(rc6);
610 if (unlikely(pctx_corrupted(rc6))) {
611 intel_rc6_disable(rc6);
618 /* Turn off the HW timers and go directly to rc6 */
619 set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
620 set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
623 void intel_rc6_disable(struct intel_rc6 *rc6)
629 rc6->enabled = false;
631 __intel_rc6_disable(rc6);
634 void intel_rc6_fini(struct intel_rc6 *rc6)
636 struct drm_i915_gem_object *pctx;
638 intel_rc6_disable(rc6);
640 pctx = fetch_and_zero(&rc6->pctx);
642 i915_gem_object_put(pctx);
648 static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
650 u32 lower, upper, tmp;
654 * The register accessed do not need forcewake. We borrow
655 * uncore lock to prevent concurrent access to range reg.
657 lockdep_assert_held(&uncore->lock);
660 * vlv and chv residency counters are 40 bits in width.
661 * With a control bit, we can choose between upper or lower
662 * 32bit window into this counter.
664 * Although we always use the counter in high-range mode elsewhere,
665 * userspace may attempt to read the value before rc6 is initialised,
666 * before we have set the default VLV_COUNTER_CONTROL value. So always
667 * set the high bit to be safe.
669 set(uncore, VLV_COUNTER_CONTROL,
670 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
671 upper = intel_uncore_read_fw(uncore, reg);
675 set(uncore, VLV_COUNTER_CONTROL,
676 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
677 lower = intel_uncore_read_fw(uncore, reg);
679 set(uncore, VLV_COUNTER_CONTROL,
680 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
681 upper = intel_uncore_read_fw(uncore, reg);
682 } while (upper != tmp && --loop);
685 * Everywhere else we always use VLV_COUNTER_CONTROL with the
686 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
690 return lower | (u64)upper << 8;
693 u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
695 struct drm_i915_private *i915 = rc6_to_i915(rc6);
696 struct intel_uncore *uncore = rc6_to_uncore(rc6);
697 u64 time_hw, prev_hw, overflow_hw;
698 unsigned int fw_domains;
707 * Store previous hw counter values for counter wrap-around handling.
709 * There are only four interesting registers and they live next to each
710 * other so we can use the relative address, compared to the smallest
711 * one as the index into driver storage.
713 i = (i915_mmio_reg_offset(reg) -
714 i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
715 if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency)))
718 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
720 spin_lock_irqsave(&uncore->lock, flags);
721 intel_uncore_forcewake_get__locked(uncore, fw_domains);
723 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
724 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
726 div = i915->czclk_freq;
727 overflow_hw = BIT_ULL(40);
728 time_hw = vlv_residency_raw(uncore, reg);
730 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
731 if (IS_GEN9_LP(i915)) {
739 overflow_hw = BIT_ULL(32);
740 time_hw = intel_uncore_read_fw(uncore, reg);
744 * Counter wrap handling.
746 * But relying on a sufficient frequency of queries otherwise counters
749 prev_hw = rc6->prev_hw_residency[i];
750 rc6->prev_hw_residency[i] = time_hw;
752 /* RC6 delta from last sample. */
753 if (time_hw >= prev_hw)
756 time_hw += overflow_hw - prev_hw;
758 /* Add delta to RC6 extended raw driver copy. */
759 time_hw += rc6->cur_residency[i];
760 rc6->cur_residency[i] = time_hw;
762 intel_uncore_forcewake_put__locked(uncore, fw_domains);
763 spin_unlock_irqrestore(&uncore->lock, flags);
765 return mul_u64_u32_div(time_hw, mul, div);
768 u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
770 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
773 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
774 #include "selftest_rc6.c"