2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <drm/i915_drm.h>
11 #include "intel_gt_irq.h"
12 #include "intel_gt_pm_irq.h"
13 #include "intel_rps.h"
14 #include "intel_sideband.h"
15 #include "../../../platform/x86/intel_ips.h"
18 * Lock protecting IPS related data structures
20 static DEFINE_SPINLOCK(mchdev_lock);
22 static struct intel_gt *rps_to_gt(struct intel_rps *rps)
24 return container_of(rps, struct intel_gt, rps);
27 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
29 return rps_to_gt(rps)->i915;
32 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
34 return rps_to_gt(rps)->uncore;
37 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
39 return mask & ~rps->pm_intrmsk_mbz;
42 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
44 intel_uncore_write_fw(uncore, reg, val);
47 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
51 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
52 if (val > rps->min_freq_softlimit)
53 mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
54 GEN6_PM_RP_DOWN_THRESHOLD |
55 GEN6_PM_RP_DOWN_TIMEOUT);
57 if (val < rps->max_freq_softlimit)
58 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
60 mask &= rps->pm_events;
62 return rps_pm_sanitize_mask(rps, ~mask);
65 static void rps_reset_ei(struct intel_rps *rps)
67 memset(&rps->ei, 0, sizeof(rps->ei));
70 static void rps_enable_interrupts(struct intel_rps *rps)
72 struct intel_gt *gt = rps_to_gt(rps);
74 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
75 rps->pm_events, rps_pm_mask(rps, rps->last_freq));
79 spin_lock_irq(>->irq_lock);
80 gen6_gt_pm_enable_irq(gt, rps->pm_events);
81 spin_unlock_irq(>->irq_lock);
83 intel_uncore_write(gt->uncore,
84 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
87 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
89 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
92 static void gen11_rps_reset_interrupts(struct intel_rps *rps)
94 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
98 static void rps_reset_interrupts(struct intel_rps *rps)
100 struct intel_gt *gt = rps_to_gt(rps);
102 spin_lock_irq(>->irq_lock);
103 if (INTEL_GEN(gt->i915) >= 11)
104 gen11_rps_reset_interrupts(rps);
106 gen6_rps_reset_interrupts(rps);
109 spin_unlock_irq(>->irq_lock);
112 static void rps_disable_interrupts(struct intel_rps *rps)
114 struct intel_gt *gt = rps_to_gt(rps);
116 intel_uncore_write(gt->uncore,
117 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
119 spin_lock_irq(>->irq_lock);
120 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
121 spin_unlock_irq(>->irq_lock);
123 intel_synchronize_irq(gt->i915);
126 * Now that we will not be generating any more work, flush any
127 * outstanding tasks. As we are called on the RPS idle path,
128 * we will reset the GPU to minimum frequencies, so the current
129 * state of the worker can be discarded.
131 cancel_work_sync(&rps->work);
133 rps_reset_interrupts(rps);
134 GT_TRACE(gt, "interrupts:off\n");
137 static const struct cparams {
143 { 1, 1333, 301, 28664 },
144 { 1, 1066, 294, 24460 },
145 { 1, 800, 294, 25192 },
146 { 0, 1333, 276, 27605 },
147 { 0, 1066, 276, 27605 },
148 { 0, 800, 231, 23784 },
151 static void gen5_rps_init(struct intel_rps *rps)
153 struct drm_i915_private *i915 = rps_to_i915(rps);
154 struct intel_uncore *uncore = rps_to_uncore(rps);
155 u8 fmax, fmin, fstart;
159 if (i915->fsb_freq <= 3200)
161 else if (i915->fsb_freq <= 4800)
166 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
167 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
168 rps->ips.m = cparams[i].m;
169 rps->ips.c = cparams[i].c;
174 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
176 /* Set up min, max, and cur for interrupt handling */
177 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
178 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
179 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
180 MEMMODE_FSTART_SHIFT;
181 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
184 rps->min_freq = fmax;
185 rps->efficient_freq = fstart;
186 rps->max_freq = fmin;
190 __ips_chipset_val(struct intel_ips *ips)
192 struct intel_uncore *uncore =
193 rps_to_uncore(container_of(ips, struct intel_rps, ips));
194 unsigned long now = jiffies_to_msecs(jiffies), dt;
195 unsigned long result;
198 lockdep_assert_held(&mchdev_lock);
201 * Prevent division-by-zero if we are asking too fast.
202 * Also, we don't get interesting results if we are polling
203 * faster than once in 10ms, so just return the saved value
206 dt = now - ips->last_time1;
208 return ips->chipset_power;
210 /* FIXME: handle per-counter overflow */
211 total = intel_uncore_read(uncore, DMIEC);
212 total += intel_uncore_read(uncore, DDREC);
213 total += intel_uncore_read(uncore, CSIEC);
215 delta = total - ips->last_count1;
217 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
219 ips->last_count1 = total;
220 ips->last_time1 = now;
222 ips->chipset_power = result;
227 static unsigned long ips_mch_val(struct intel_uncore *uncore)
229 unsigned int m, x, b;
232 tsfs = intel_uncore_read(uncore, TSFS);
233 x = intel_uncore_read8(uncore, TR1);
235 b = tsfs & TSFS_INTR_MASK;
236 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
238 return m * x / 127 - b;
241 static int _pxvid_to_vd(u8 pxvid)
246 if (pxvid >= 8 && pxvid < 31)
249 return (pxvid + 2) * 125;
252 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
254 const int vd = _pxvid_to_vd(pxvid);
256 if (INTEL_INFO(i915)->is_mobile)
257 return max(vd - 1125, 0);
262 static void __gen5_ips_update(struct intel_ips *ips)
264 struct intel_uncore *uncore =
265 rps_to_uncore(container_of(ips, struct intel_rps, ips));
269 lockdep_assert_held(&mchdev_lock);
271 now = ktime_get_raw_ns();
272 dt = now - ips->last_time2;
273 do_div(dt, NSEC_PER_MSEC);
275 /* Don't divide by 0 */
279 count = intel_uncore_read(uncore, GFXEC);
280 delta = count - ips->last_count2;
282 ips->last_count2 = count;
283 ips->last_time2 = now;
285 /* More magic constants... */
286 ips->gfx_power = div_u64(delta * 1181, dt * 10);
289 static void gen5_rps_update(struct intel_rps *rps)
291 spin_lock_irq(&mchdev_lock);
292 __gen5_ips_update(&rps->ips);
293 spin_unlock_irq(&mchdev_lock);
296 static bool gen5_rps_set(struct intel_rps *rps, u8 val)
298 struct intel_uncore *uncore = rps_to_uncore(rps);
301 lockdep_assert_held(&mchdev_lock);
303 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
304 if (rgvswctl & MEMCTL_CMD_STS) {
305 DRM_DEBUG("gpu busy, RCS change rejected\n");
306 return false; /* still busy with another command */
309 /* Invert the frequency bin into an ips delay */
310 val = rps->max_freq - val;
311 val = rps->min_freq + val;
314 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
315 (val << MEMCTL_FREQ_SHIFT) |
317 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
318 intel_uncore_posting_read16(uncore, MEMSWCTL);
320 rgvswctl |= MEMCTL_CMD_STS;
321 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
326 static unsigned long intel_pxfreq(u32 vidfreq)
328 int div = (vidfreq & 0x3f0000) >> 16;
329 int post = (vidfreq & 0x3000) >> 12;
330 int pre = (vidfreq & 0x7);
335 return div * 133333 / (pre << post);
338 static unsigned int init_emon(struct intel_uncore *uncore)
343 /* Disable to program */
344 intel_uncore_write(uncore, ECR, 0);
345 intel_uncore_posting_read(uncore, ECR);
347 /* Program energy weights for various events */
348 intel_uncore_write(uncore, SDEW, 0x15040d00);
349 intel_uncore_write(uncore, CSIEW0, 0x007f0000);
350 intel_uncore_write(uncore, CSIEW1, 0x1e220004);
351 intel_uncore_write(uncore, CSIEW2, 0x04000004);
353 for (i = 0; i < 5; i++)
354 intel_uncore_write(uncore, PEW(i), 0);
355 for (i = 0; i < 3; i++)
356 intel_uncore_write(uncore, DEW(i), 0);
358 /* Program P-state weights to account for frequency power adjustment */
359 for (i = 0; i < 16; i++) {
360 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
361 unsigned int freq = intel_pxfreq(pxvidfreq);
363 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
366 val = vid * vid * freq / 1000 * 255;
367 val /= 127 * 127 * 900;
371 /* Render standby states get 0 weight */
375 for (i = 0; i < 4; i++) {
376 intel_uncore_write(uncore, PXW(i),
377 pxw[i * 4 + 0] << 24 |
378 pxw[i * 4 + 1] << 16 |
379 pxw[i * 4 + 2] << 8 |
380 pxw[i * 4 + 3] << 0);
383 /* Adjust magic regs to magic values (more experimental results) */
384 intel_uncore_write(uncore, OGW0, 0);
385 intel_uncore_write(uncore, OGW1, 0);
386 intel_uncore_write(uncore, EG0, 0x00007f00);
387 intel_uncore_write(uncore, EG1, 0x0000000e);
388 intel_uncore_write(uncore, EG2, 0x000e0000);
389 intel_uncore_write(uncore, EG3, 0x68000300);
390 intel_uncore_write(uncore, EG4, 0x42000000);
391 intel_uncore_write(uncore, EG5, 0x00140031);
392 intel_uncore_write(uncore, EG6, 0);
393 intel_uncore_write(uncore, EG7, 0);
395 for (i = 0; i < 8; i++)
396 intel_uncore_write(uncore, PXWL(i), 0);
398 /* Enable PMON + select events */
399 intel_uncore_write(uncore, ECR, 0x80000019);
401 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
404 static bool gen5_rps_enable(struct intel_rps *rps)
406 struct intel_uncore *uncore = rps_to_uncore(rps);
410 spin_lock_irq(&mchdev_lock);
412 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
414 /* Enable temp reporting */
415 intel_uncore_write16(uncore, PMMISC,
416 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
417 intel_uncore_write16(uncore, TSC1,
418 intel_uncore_read16(uncore, TSC1) | TSE);
420 /* 100ms RC evaluation intervals */
421 intel_uncore_write(uncore, RCUPEI, 100000);
422 intel_uncore_write(uncore, RCDNEI, 100000);
424 /* Set max/min thresholds to 90ms and 80ms respectively */
425 intel_uncore_write(uncore, RCBMAXAVG, 90000);
426 intel_uncore_write(uncore, RCBMINAVG, 80000);
428 intel_uncore_write(uncore, MEMIHYST, 1);
430 /* Set up min, max, and cur for interrupt handling */
431 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
432 MEMMODE_FSTART_SHIFT;
434 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
435 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
437 intel_uncore_write(uncore,
439 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
441 intel_uncore_write(uncore, VIDSTART, vstart);
442 intel_uncore_posting_read(uncore, VIDSTART);
444 rgvmodectl |= MEMMODE_SWMODE_EN;
445 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
447 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
448 MEMCTL_CMD_STS) == 0, 10))
449 drm_err(&uncore->i915->drm,
450 "stuck trying to change perf mode\n");
453 gen5_rps_set(rps, rps->cur_freq);
455 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
456 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
457 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
458 rps->ips.last_time1 = jiffies_to_msecs(jiffies);
460 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
461 rps->ips.last_time2 = ktime_get_raw_ns();
463 spin_unlock_irq(&mchdev_lock);
465 rps->ips.corr = init_emon(uncore);
470 static void gen5_rps_disable(struct intel_rps *rps)
472 struct intel_uncore *uncore = rps_to_uncore(rps);
475 spin_lock_irq(&mchdev_lock);
477 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
479 /* Ack interrupts, disable EFC interrupt */
480 intel_uncore_write(uncore, MEMINTREN,
481 intel_uncore_read(uncore, MEMINTREN) &
482 ~MEMINT_EVAL_CHG_EN);
483 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
484 intel_uncore_write(uncore, DEIER,
485 intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
486 intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
487 intel_uncore_write(uncore, DEIMR,
488 intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
490 /* Go back to the starting frequency */
491 gen5_rps_set(rps, rps->idle_freq);
493 rgvswctl |= MEMCTL_CMD_STS;
494 intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
497 spin_unlock_irq(&mchdev_lock);
500 static u32 rps_limits(struct intel_rps *rps, u8 val)
505 * Only set the down limit when we've reached the lowest level to avoid
506 * getting more interrupts, otherwise leave this clear. This prevents a
507 * race in the hw when coming out of rc6: There's a tiny window where
508 * the hw runs at the minimal clock before selecting the desired
509 * frequency, if the down threshold expires in that window we will not
510 * receive a down interrupt.
512 if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
513 limits = rps->max_freq_softlimit << 23;
514 if (val <= rps->min_freq_softlimit)
515 limits |= rps->min_freq_softlimit << 14;
517 limits = rps->max_freq_softlimit << 24;
518 if (val <= rps->min_freq_softlimit)
519 limits |= rps->min_freq_softlimit << 16;
525 static void rps_set_power(struct intel_rps *rps, int new_power)
527 struct intel_uncore *uncore = rps_to_uncore(rps);
528 struct drm_i915_private *i915 = rps_to_i915(rps);
529 u32 threshold_up = 0, threshold_down = 0; /* in % */
530 u32 ei_up = 0, ei_down = 0;
532 lockdep_assert_held(&rps->power.mutex);
534 if (new_power == rps->power.mode)
537 /* Note the units here are not exactly 1us, but 1280ns. */
540 /* Upclock if more than 95% busy over 16ms */
544 /* Downclock if less than 85% busy over 32ms */
550 /* Upclock if more than 90% busy over 13ms */
554 /* Downclock if less than 75% busy over 32ms */
560 /* Upclock if more than 85% busy over 10ms */
564 /* Downclock if less than 60% busy over 32ms */
570 /* When byt can survive without system hang with dynamic
571 * sw freq adjustments, this restriction can be lifted.
573 if (IS_VALLEYVIEW(i915))
576 GT_TRACE(rps_to_gt(rps),
577 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
578 new_power, threshold_up, ei_up, threshold_down, ei_down);
580 set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
581 set(uncore, GEN6_RP_UP_THRESHOLD,
582 GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
584 set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
585 set(uncore, GEN6_RP_DOWN_THRESHOLD,
586 GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
588 set(uncore, GEN6_RP_CONTROL,
589 (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
590 GEN6_RP_MEDIA_HW_NORMAL_MODE |
591 GEN6_RP_MEDIA_IS_GFX |
593 GEN6_RP_UP_BUSY_AVG |
594 GEN6_RP_DOWN_IDLE_AVG);
597 rps->power.mode = new_power;
598 rps->power.up_threshold = threshold_up;
599 rps->power.down_threshold = threshold_down;
602 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
606 new_power = rps->power.mode;
607 switch (rps->power.mode) {
609 if (val > rps->efficient_freq + 1 &&
615 if (val <= rps->efficient_freq &&
617 new_power = LOW_POWER;
618 else if (val >= rps->rp0_freq &&
620 new_power = HIGH_POWER;
624 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
629 /* Max/min bins are special */
630 if (val <= rps->min_freq_softlimit)
631 new_power = LOW_POWER;
632 if (val >= rps->max_freq_softlimit)
633 new_power = HIGH_POWER;
635 mutex_lock(&rps->power.mutex);
636 if (rps->power.interactive)
637 new_power = HIGH_POWER;
638 rps_set_power(rps, new_power);
639 mutex_unlock(&rps->power.mutex);
642 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
644 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
646 mutex_lock(&rps->power.mutex);
648 if (!rps->power.interactive++ && READ_ONCE(rps->active))
649 rps_set_power(rps, HIGH_POWER);
651 GEM_BUG_ON(!rps->power.interactive);
652 rps->power.interactive--;
654 mutex_unlock(&rps->power.mutex);
657 static int gen6_rps_set(struct intel_rps *rps, u8 val)
659 struct intel_uncore *uncore = rps_to_uncore(rps);
660 struct drm_i915_private *i915 = rps_to_i915(rps);
663 if (INTEL_GEN(i915) >= 9)
664 swreq = GEN9_FREQUENCY(val);
665 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
666 swreq = HSW_FREQUENCY(val);
668 swreq = (GEN6_FREQUENCY(val) |
670 GEN6_AGGRESSIVE_TURBO);
671 set(uncore, GEN6_RPNSWREQ, swreq);
673 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
674 val, intel_gpu_freq(rps, val), swreq);
679 static int vlv_rps_set(struct intel_rps *rps, u8 val)
681 struct drm_i915_private *i915 = rps_to_i915(rps);
685 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
688 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
689 val, intel_gpu_freq(rps, val));
694 static int rps_set(struct intel_rps *rps, u8 val, bool update)
696 struct drm_i915_private *i915 = rps_to_i915(rps);
699 if (INTEL_GEN(i915) < 6)
702 if (val == rps->last_freq)
705 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
706 err = vlv_rps_set(rps, val);
708 err = gen6_rps_set(rps, val);
713 gen6_rps_set_thresholds(rps, val);
714 rps->last_freq = val;
719 void intel_rps_unpark(struct intel_rps *rps)
724 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
727 * Use the user's desired frequency as a guide, but for better
728 * performance, jump directly to RPe as our starting frequency.
730 mutex_lock(&rps->lock);
732 WRITE_ONCE(rps->active, true);
736 rps->min_freq_softlimit,
737 rps->max_freq_softlimit));
741 mutex_unlock(&rps->lock);
743 if (INTEL_GEN(rps_to_i915(rps)) >= 6)
744 rps_enable_interrupts(rps);
746 if (IS_GEN(rps_to_i915(rps), 5))
747 gen5_rps_update(rps);
750 void intel_rps_park(struct intel_rps *rps)
752 struct drm_i915_private *i915 = rps_to_i915(rps);
757 if (INTEL_GEN(i915) >= 6)
758 rps_disable_interrupts(rps);
760 WRITE_ONCE(rps->active, false);
761 if (rps->last_freq <= rps->idle_freq)
765 * The punit delays the write of the frequency and voltage until it
766 * determines the GPU is awake. During normal usage we don't want to
767 * waste power changing the frequency if the GPU is sleeping (rc6).
768 * However, the GPU and driver is now idle and we do not want to delay
769 * switching to minimum voltage (reducing power whilst idle) as we do
770 * not expect to be woken in the near future and so must flush the
771 * change by waking the device.
773 * We choose to take the media powerwell (either would do to trick the
774 * punit into committing the voltage change) as that takes a lot less
775 * power than the render powerwell.
777 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
778 rps_set(rps, rps->idle_freq, false);
779 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
782 * Since we will try and restart from the previously requested
783 * frequency on unparking, treat this idle point as a downclock
784 * interrupt and reduce the frequency for resume. If we park/unpark
785 * more frequently than the rps worker can run, we will not respond
786 * to any EI and never see a change in frequency.
788 * (Note we accommodate Cherryview's limitation of only using an
789 * even bin by applying it to all.)
792 max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
794 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
797 void intel_rps_boost(struct i915_request *rq)
799 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
802 if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
805 /* Serializes with i915_request_retire() */
806 spin_lock_irqsave(&rq->lock, flags);
807 if (!i915_request_has_waitboost(rq) &&
808 !dma_fence_is_signaled_locked(&rq->fence)) {
809 set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
811 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
812 rq->fence.context, rq->fence.seqno);
814 if (!atomic_fetch_inc(&rps->num_waiters) &&
815 READ_ONCE(rps->cur_freq) < rps->boost_freq)
816 schedule_work(&rps->work);
818 atomic_inc(&rps->boosts);
820 spin_unlock_irqrestore(&rq->lock, flags);
823 int intel_rps_set(struct intel_rps *rps, u8 val)
827 lockdep_assert_held(&rps->lock);
828 GEM_BUG_ON(val > rps->max_freq);
829 GEM_BUG_ON(val < rps->min_freq);
832 err = rps_set(rps, val, true);
837 * Make sure we continue to get interrupts
838 * until we hit the minimum or maximum frequencies.
840 if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
841 struct intel_uncore *uncore = rps_to_uncore(rps);
844 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
846 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
854 static void gen6_rps_init(struct intel_rps *rps)
856 struct drm_i915_private *i915 = rps_to_i915(rps);
857 struct intel_uncore *uncore = rps_to_uncore(rps);
859 /* All of these values are in units of 50MHz */
861 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
862 if (IS_GEN9_LP(i915)) {
863 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
865 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
866 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
867 rps->min_freq = (rp_state_cap >> 0) & 0xff;
869 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
871 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
872 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
873 rps->min_freq = (rp_state_cap >> 16) & 0xff;
876 /* hw_max = RP0 until we check for overclocking */
877 rps->max_freq = rps->rp0_freq;
879 rps->efficient_freq = rps->rp1_freq;
880 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
881 IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
884 if (sandybridge_pcode_read(i915,
885 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
886 &ddcc_status, NULL) == 0)
887 rps->efficient_freq =
889 (ddcc_status >> 8) & 0xff,
894 if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
895 /* Store the frequency values in 16.66 MHZ units, which is
896 * the natural hardware unit for SKL
898 rps->rp0_freq *= GEN9_FREQ_SCALER;
899 rps->rp1_freq *= GEN9_FREQ_SCALER;
900 rps->min_freq *= GEN9_FREQ_SCALER;
901 rps->max_freq *= GEN9_FREQ_SCALER;
902 rps->efficient_freq *= GEN9_FREQ_SCALER;
906 static bool rps_reset(struct intel_rps *rps)
908 struct drm_i915_private *i915 = rps_to_i915(rps);
911 rps->power.mode = -1;
914 if (rps_set(rps, rps->min_freq, true)) {
915 drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
919 rps->cur_freq = rps->min_freq;
923 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
924 static bool gen9_rps_enable(struct intel_rps *rps)
926 struct drm_i915_private *i915 = rps_to_i915(rps);
927 struct intel_uncore *uncore = rps_to_uncore(rps);
929 /* Program defaults and thresholds for RPS */
931 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
932 GEN9_FREQUENCY(rps->rp1_freq));
934 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
936 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
938 return rps_reset(rps);
941 static bool gen8_rps_enable(struct intel_rps *rps)
943 struct intel_uncore *uncore = rps_to_uncore(rps);
945 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
946 HSW_FREQUENCY(rps->rp1_freq));
948 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
950 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
952 return rps_reset(rps);
955 static bool gen6_rps_enable(struct intel_rps *rps)
957 struct intel_uncore *uncore = rps_to_uncore(rps);
959 /* Power down if completely idle for over 50ms */
960 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
961 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
963 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
964 GEN6_PM_RP_DOWN_THRESHOLD |
965 GEN6_PM_RP_DOWN_TIMEOUT);
967 return rps_reset(rps);
970 static int chv_rps_max_freq(struct intel_rps *rps)
972 struct drm_i915_private *i915 = rps_to_i915(rps);
975 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
977 switch (RUNTIME_INFO(i915)->sseu.eu_total) {
980 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
984 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
989 /* Setting (2 * 8) Min RP0 for any other combination */
990 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
994 return val & FB_GFX_FREQ_FUSE_MASK;
997 static int chv_rps_rpe_freq(struct intel_rps *rps)
999 struct drm_i915_private *i915 = rps_to_i915(rps);
1002 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
1003 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
1005 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
1008 static int chv_rps_guar_freq(struct intel_rps *rps)
1010 struct drm_i915_private *i915 = rps_to_i915(rps);
1013 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1015 return val & FB_GFX_FREQ_FUSE_MASK;
1018 static u32 chv_rps_min_freq(struct intel_rps *rps)
1020 struct drm_i915_private *i915 = rps_to_i915(rps);
1023 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1024 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1026 return val & FB_GFX_FREQ_FUSE_MASK;
1029 static bool chv_rps_enable(struct intel_rps *rps)
1031 struct intel_uncore *uncore = rps_to_uncore(rps);
1032 struct drm_i915_private *i915 = rps_to_i915(rps);
1035 /* 1: Program defaults and thresholds for RPS*/
1036 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1037 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1038 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1039 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1040 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1042 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1045 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1046 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1047 GEN6_RP_MEDIA_IS_GFX |
1049 GEN6_RP_UP_BUSY_AVG |
1050 GEN6_RP_DOWN_IDLE_AVG);
1052 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1053 GEN6_PM_RP_DOWN_THRESHOLD |
1054 GEN6_PM_RP_DOWN_TIMEOUT);
1056 /* Setting Fixed Bias */
1057 vlv_punit_get(i915);
1059 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1060 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1062 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1064 vlv_punit_put(i915);
1066 /* RPS code assumes GPLL is used */
1067 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1068 "GPLL not enabled\n");
1070 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1071 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1073 return rps_reset(rps);
1076 static int vlv_rps_guar_freq(struct intel_rps *rps)
1078 struct drm_i915_private *i915 = rps_to_i915(rps);
1081 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1083 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1084 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1089 static int vlv_rps_max_freq(struct intel_rps *rps)
1091 struct drm_i915_private *i915 = rps_to_i915(rps);
1094 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1096 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1098 rp0 = min_t(u32, rp0, 0xea);
1103 static int vlv_rps_rpe_freq(struct intel_rps *rps)
1105 struct drm_i915_private *i915 = rps_to_i915(rps);
1108 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1109 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1110 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1111 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1116 static int vlv_rps_min_freq(struct intel_rps *rps)
1118 struct drm_i915_private *i915 = rps_to_i915(rps);
1121 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1123 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1124 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1125 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1126 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1127 * to make sure it matches what Punit accepts.
1129 return max_t(u32, val, 0xc0);
1132 static bool vlv_rps_enable(struct intel_rps *rps)
1134 struct intel_uncore *uncore = rps_to_uncore(rps);
1135 struct drm_i915_private *i915 = rps_to_i915(rps);
1138 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1139 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1140 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1141 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1142 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1144 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1146 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1147 GEN6_RP_MEDIA_TURBO |
1148 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1149 GEN6_RP_MEDIA_IS_GFX |
1151 GEN6_RP_UP_BUSY_AVG |
1152 GEN6_RP_DOWN_IDLE_CONT);
1154 /* WaGsvRC0ResidencyMethod:vlv */
1155 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
1157 vlv_punit_get(i915);
1159 /* Setting Fixed Bias */
1160 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1161 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1163 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1165 vlv_punit_put(i915);
1167 /* RPS code assumes GPLL is used */
1168 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1169 "GPLL not enabled\n");
1171 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1172 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1174 return rps_reset(rps);
1177 static unsigned long __ips_gfx_val(struct intel_ips *ips)
1179 struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1180 struct intel_uncore *uncore = rps_to_uncore(rps);
1181 unsigned long t, corr, state1, corr2, state2;
1184 lockdep_assert_held(&mchdev_lock);
1186 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1187 pxvid = (pxvid >> 24) & 0x7f;
1188 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1192 /* Revel in the empirically derived constants */
1194 /* Correction factor in 1/100000 units */
1195 t = ips_mch_val(uncore);
1197 corr = t * 2349 + 135940;
1199 corr = t * 964 + 29317;
1201 corr = t * 301 + 1004;
1203 corr = corr * 150142 * state1 / 10000 - 78642;
1205 corr2 = corr * ips->corr;
1207 state2 = corr2 * state1 / 10000;
1208 state2 /= 100; /* convert to mW */
1210 __gen5_ips_update(ips);
1212 return ips->gfx_power + state2;
1215 void intel_rps_enable(struct intel_rps *rps)
1217 struct drm_i915_private *i915 = rps_to_i915(rps);
1218 struct intel_uncore *uncore = rps_to_uncore(rps);
1220 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1221 if (IS_CHERRYVIEW(i915))
1222 rps->enabled = chv_rps_enable(rps);
1223 else if (IS_VALLEYVIEW(i915))
1224 rps->enabled = vlv_rps_enable(rps);
1225 else if (INTEL_GEN(i915) >= 9)
1226 rps->enabled = gen9_rps_enable(rps);
1227 else if (INTEL_GEN(i915) >= 8)
1228 rps->enabled = gen8_rps_enable(rps);
1229 else if (INTEL_GEN(i915) >= 6)
1230 rps->enabled = gen6_rps_enable(rps);
1231 else if (IS_IRONLAKE_M(i915))
1232 rps->enabled = gen5_rps_enable(rps);
1233 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1237 GT_TRACE(rps_to_gt(rps),
1238 "min:%x, max:%x, freq:[%d, %d]\n",
1239 rps->min_freq, rps->max_freq,
1240 intel_gpu_freq(rps, rps->min_freq),
1241 intel_gpu_freq(rps, rps->max_freq));
1243 GEM_BUG_ON(rps->max_freq < rps->min_freq);
1244 GEM_BUG_ON(rps->idle_freq > rps->max_freq);
1246 GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
1247 GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
1250 static void gen6_rps_disable(struct intel_rps *rps)
1252 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1255 void intel_rps_disable(struct intel_rps *rps)
1257 struct drm_i915_private *i915 = rps_to_i915(rps);
1259 rps->enabled = false;
1261 if (INTEL_GEN(i915) >= 6)
1262 gen6_rps_disable(rps);
1263 else if (IS_IRONLAKE_M(i915))
1264 gen5_rps_disable(rps);
1267 static int byt_gpu_freq(struct intel_rps *rps, int val)
1271 * Slow = Fast = GPLL ref * N
1273 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1276 static int byt_freq_opcode(struct intel_rps *rps, int val)
1278 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1281 static int chv_gpu_freq(struct intel_rps *rps, int val)
1285 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1287 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1290 static int chv_freq_opcode(struct intel_rps *rps, int val)
1292 /* CHV needs even values */
1293 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1296 int intel_gpu_freq(struct intel_rps *rps, int val)
1298 struct drm_i915_private *i915 = rps_to_i915(rps);
1300 if (INTEL_GEN(i915) >= 9)
1301 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1303 else if (IS_CHERRYVIEW(i915))
1304 return chv_gpu_freq(rps, val);
1305 else if (IS_VALLEYVIEW(i915))
1306 return byt_gpu_freq(rps, val);
1308 return val * GT_FREQUENCY_MULTIPLIER;
1311 int intel_freq_opcode(struct intel_rps *rps, int val)
1313 struct drm_i915_private *i915 = rps_to_i915(rps);
1315 if (INTEL_GEN(i915) >= 9)
1316 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1317 GT_FREQUENCY_MULTIPLIER);
1318 else if (IS_CHERRYVIEW(i915))
1319 return chv_freq_opcode(rps, val);
1320 else if (IS_VALLEYVIEW(i915))
1321 return byt_freq_opcode(rps, val);
1323 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1326 static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1328 struct drm_i915_private *i915 = rps_to_i915(rps);
1330 rps->gpll_ref_freq =
1331 vlv_get_cck_clock(i915, "GPLL ref",
1332 CCK_GPLL_CLOCK_CONTROL,
1335 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
1336 rps->gpll_ref_freq);
1339 static void vlv_rps_init(struct intel_rps *rps)
1341 struct drm_i915_private *i915 = rps_to_i915(rps);
1344 vlv_iosf_sb_get(i915,
1345 BIT(VLV_IOSF_SB_PUNIT) |
1346 BIT(VLV_IOSF_SB_NC) |
1347 BIT(VLV_IOSF_SB_CCK));
1349 vlv_init_gpll_ref_freq(rps);
1351 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1352 switch ((val >> 6) & 3) {
1355 i915->mem_freq = 800;
1358 i915->mem_freq = 1066;
1361 i915->mem_freq = 1333;
1364 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1366 rps->max_freq = vlv_rps_max_freq(rps);
1367 rps->rp0_freq = rps->max_freq;
1368 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1369 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1371 rps->efficient_freq = vlv_rps_rpe_freq(rps);
1372 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1373 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1375 rps->rp1_freq = vlv_rps_guar_freq(rps);
1376 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1377 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1379 rps->min_freq = vlv_rps_min_freq(rps);
1380 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1381 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1383 vlv_iosf_sb_put(i915,
1384 BIT(VLV_IOSF_SB_PUNIT) |
1385 BIT(VLV_IOSF_SB_NC) |
1386 BIT(VLV_IOSF_SB_CCK));
1389 static void chv_rps_init(struct intel_rps *rps)
1391 struct drm_i915_private *i915 = rps_to_i915(rps);
1394 vlv_iosf_sb_get(i915,
1395 BIT(VLV_IOSF_SB_PUNIT) |
1396 BIT(VLV_IOSF_SB_NC) |
1397 BIT(VLV_IOSF_SB_CCK));
1399 vlv_init_gpll_ref_freq(rps);
1401 val = vlv_cck_read(i915, CCK_FUSE_REG);
1403 switch ((val >> 2) & 0x7) {
1405 i915->mem_freq = 2000;
1408 i915->mem_freq = 1600;
1411 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1413 rps->max_freq = chv_rps_max_freq(rps);
1414 rps->rp0_freq = rps->max_freq;
1415 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1416 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1418 rps->efficient_freq = chv_rps_rpe_freq(rps);
1419 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1420 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1422 rps->rp1_freq = chv_rps_guar_freq(rps);
1423 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1424 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1426 rps->min_freq = chv_rps_min_freq(rps);
1427 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1428 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1430 vlv_iosf_sb_put(i915,
1431 BIT(VLV_IOSF_SB_PUNIT) |
1432 BIT(VLV_IOSF_SB_NC) |
1433 BIT(VLV_IOSF_SB_CCK));
1435 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1436 rps->rp1_freq | rps->min_freq) & 1,
1437 "Odd GPU freq values\n");
1440 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1442 ei->ktime = ktime_get_raw();
1443 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1444 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1447 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1449 struct intel_uncore *uncore = rps_to_uncore(rps);
1450 const struct intel_rps_ei *prev = &rps->ei;
1451 struct intel_rps_ei now;
1454 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1457 vlv_c0_read(uncore, &now);
1463 time = ktime_us_delta(now.ktime, prev->ktime);
1465 time *= rps_to_i915(rps)->czclk_freq;
1467 /* Workload can be split between render + media,
1468 * e.g. SwapBuffers being blitted in X after being rendered in
1469 * mesa. To account for this we need to combine both engines
1470 * into our activity counter.
1472 render = now.render_c0 - prev->render_c0;
1473 media = now.media_c0 - prev->media_c0;
1474 c0 = max(render, media);
1475 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1477 if (c0 > time * rps->power.up_threshold)
1478 events = GEN6_PM_RP_UP_THRESHOLD;
1479 else if (c0 < time * rps->power.down_threshold)
1480 events = GEN6_PM_RP_DOWN_THRESHOLD;
1487 static void rps_work(struct work_struct *work)
1489 struct intel_rps *rps = container_of(work, typeof(*rps), work);
1490 struct intel_gt *gt = rps_to_gt(rps);
1491 struct drm_i915_private *i915 = rps_to_i915(rps);
1492 bool client_boost = false;
1493 int new_freq, adj, min, max;
1496 spin_lock_irq(>->irq_lock);
1497 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
1498 client_boost = atomic_read(&rps->num_waiters);
1499 spin_unlock_irq(>->irq_lock);
1501 /* Make sure we didn't queue anything we're not going to process. */
1502 if (!pm_iir && !client_boost)
1505 mutex_lock(&rps->lock);
1507 pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1509 adj = rps->last_adj;
1510 new_freq = rps->cur_freq;
1511 min = rps->min_freq_softlimit;
1512 max = rps->max_freq_softlimit;
1514 max = rps->max_freq;
1517 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1518 pm_iir, yesno(client_boost),
1519 adj, new_freq, min, max);
1521 if (client_boost && new_freq < rps->boost_freq) {
1522 new_freq = rps->boost_freq;
1524 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1527 else /* CHV needs even encode values */
1528 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1530 if (new_freq >= rps->max_freq_softlimit)
1532 } else if (client_boost) {
1534 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1535 if (rps->cur_freq > rps->efficient_freq)
1536 new_freq = rps->efficient_freq;
1537 else if (rps->cur_freq > rps->min_freq_softlimit)
1538 new_freq = rps->min_freq_softlimit;
1540 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1543 else /* CHV needs even encode values */
1544 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1546 if (new_freq <= rps->min_freq_softlimit)
1548 } else { /* unknown event */
1552 rps->last_adj = adj;
1555 * Limit deboosting and boosting to keep ourselves at the extremes
1556 * when in the respective power modes (i.e. slowly decrease frequencies
1557 * while in the HIGH_POWER zone and slowly increase frequencies while
1558 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1559 * to the next level quickly, and conversely if busy we expect to
1560 * hit a waitboost and rapidly switch into max power.
1562 if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1563 (adj > 0 && rps->power.mode == LOW_POWER))
1566 /* sysfs frequency interfaces may have snuck in while servicing the
1570 new_freq = clamp_t(int, new_freq, min, max);
1572 if (intel_rps_set(rps, new_freq)) {
1573 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
1577 mutex_unlock(&rps->lock);
1580 spin_lock_irq(>->irq_lock);
1581 gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1582 spin_unlock_irq(>->irq_lock);
1585 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1587 struct intel_gt *gt = rps_to_gt(rps);
1588 const u32 events = rps->pm_events & pm_iir;
1590 lockdep_assert_held(>->irq_lock);
1592 if (unlikely(!events))
1595 GT_TRACE(gt, "irq events:%x\n", events);
1597 gen6_gt_pm_mask_irq(gt, events);
1599 rps->pm_iir |= events;
1600 schedule_work(&rps->work);
1603 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1605 struct intel_gt *gt = rps_to_gt(rps);
1608 events = pm_iir & rps->pm_events;
1610 spin_lock(>->irq_lock);
1612 GT_TRACE(gt, "irq events:%x\n", events);
1614 gen6_gt_pm_mask_irq(gt, events);
1615 rps->pm_iir |= events;
1617 schedule_work(&rps->work);
1618 spin_unlock(>->irq_lock);
1621 if (INTEL_GEN(gt->i915) >= 8)
1624 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1625 intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
1627 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1628 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1631 void gen5_rps_irq_handler(struct intel_rps *rps)
1633 struct intel_uncore *uncore = rps_to_uncore(rps);
1634 u32 busy_up, busy_down, max_avg, min_avg;
1637 spin_lock(&mchdev_lock);
1639 intel_uncore_write16(uncore,
1641 intel_uncore_read(uncore, MEMINTRSTS));
1643 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1644 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1645 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1646 max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1647 min_avg = intel_uncore_read(uncore, RCBMINAVG);
1649 /* Handle RCS change request from hw */
1650 new_freq = rps->cur_freq;
1651 if (busy_up > max_avg)
1653 else if (busy_down < min_avg)
1655 new_freq = clamp(new_freq,
1656 rps->min_freq_softlimit,
1657 rps->max_freq_softlimit);
1659 if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
1660 rps->cur_freq = new_freq;
1662 spin_unlock(&mchdev_lock);
1665 void intel_rps_init_early(struct intel_rps *rps)
1667 mutex_init(&rps->lock);
1668 mutex_init(&rps->power.mutex);
1670 INIT_WORK(&rps->work, rps_work);
1672 atomic_set(&rps->num_waiters, 0);
1675 void intel_rps_init(struct intel_rps *rps)
1677 struct drm_i915_private *i915 = rps_to_i915(rps);
1679 if (IS_CHERRYVIEW(i915))
1681 else if (IS_VALLEYVIEW(i915))
1683 else if (INTEL_GEN(i915) >= 6)
1685 else if (IS_IRONLAKE_M(i915))
1688 /* Derive initial user preferences/limits from the hardware limits */
1689 rps->max_freq_softlimit = rps->max_freq;
1690 rps->min_freq_softlimit = rps->min_freq;
1692 /* After setting max-softlimit, find the overclock max freq */
1693 if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
1696 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
1698 if (params & BIT(31)) { /* OC supported */
1700 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1701 (rps->max_freq & 0xff) * 50,
1702 (params & 0xff) * 50);
1703 rps->max_freq = params & 0xff;
1707 /* Finally allow us to boost to max by default */
1708 rps->boost_freq = rps->max_freq;
1709 rps->idle_freq = rps->min_freq;
1711 /* Start in the middle, from here we will autotune based on workload */
1712 rps->cur_freq = rps->efficient_freq;
1714 rps->pm_intrmsk_mbz = 0;
1717 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1718 * if GEN6_PM_UP_EI_EXPIRED is masked.
1720 * TODO: verify if this can be reproduced on VLV,CHV.
1722 if (INTEL_GEN(i915) <= 7)
1723 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
1725 if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
1726 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1729 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
1731 struct drm_i915_private *i915 = rps_to_i915(rps);
1734 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1735 cagf = (rpstat >> 8) & 0xff;
1736 else if (INTEL_GEN(i915) >= 9)
1737 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1738 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1739 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1741 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1746 static u32 read_cagf(struct intel_rps *rps)
1748 struct drm_i915_private *i915 = rps_to_i915(rps);
1751 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1752 vlv_punit_get(i915);
1753 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1754 vlv_punit_put(i915);
1756 freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
1759 return intel_rps_get_cagf(rps, freq);
1762 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
1764 struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
1765 intel_wakeref_t wakeref;
1768 with_intel_runtime_pm_if_in_use(rpm, wakeref)
1769 freq = intel_gpu_freq(rps, read_cagf(rps));
1774 /* External interface for intel_ips.ko */
1776 static struct drm_i915_private __rcu *ips_mchdev;
1779 * Tells the intel_ips driver that the i915 driver is now loaded, if
1780 * IPS got loaded first.
1782 * This awkward dance is so that neither module has to depend on the
1783 * other in order for IPS to do the appropriate communication of
1784 * GPU turbo limits to i915.
1787 ips_ping_for_i915_load(void)
1791 link = symbol_get(ips_link_to_i915_driver);
1794 symbol_put(ips_link_to_i915_driver);
1798 void intel_rps_driver_register(struct intel_rps *rps)
1800 struct intel_gt *gt = rps_to_gt(rps);
1803 * We only register the i915 ips part with intel-ips once everything is
1804 * set up, to avoid intel-ips sneaking in and reading bogus values.
1806 if (IS_GEN(gt->i915, 5)) {
1807 GEM_BUG_ON(ips_mchdev);
1808 rcu_assign_pointer(ips_mchdev, gt->i915);
1809 ips_ping_for_i915_load();
1813 void intel_rps_driver_unregister(struct intel_rps *rps)
1815 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
1816 rcu_assign_pointer(ips_mchdev, NULL);
1819 static struct drm_i915_private *mchdev_get(void)
1821 struct drm_i915_private *i915;
1824 i915 = rcu_dereference(ips_mchdev);
1825 if (!kref_get_unless_zero(&i915->drm.ref))
1833 * i915_read_mch_val - return value for IPS use
1835 * Calculate and return a value for the IPS driver to use when deciding whether
1836 * we have thermal and power headroom to increase CPU or GPU power budget.
1838 unsigned long i915_read_mch_val(void)
1840 struct drm_i915_private *i915;
1841 unsigned long chipset_val = 0;
1842 unsigned long graphics_val = 0;
1843 intel_wakeref_t wakeref;
1845 i915 = mchdev_get();
1849 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1850 struct intel_ips *ips = &i915->gt.rps.ips;
1852 spin_lock_irq(&mchdev_lock);
1853 chipset_val = __ips_chipset_val(ips);
1854 graphics_val = __ips_gfx_val(ips);
1855 spin_unlock_irq(&mchdev_lock);
1858 drm_dev_put(&i915->drm);
1859 return chipset_val + graphics_val;
1861 EXPORT_SYMBOL_GPL(i915_read_mch_val);
1864 * i915_gpu_raise - raise GPU frequency limit
1866 * Raise the limit; IPS indicates we have thermal headroom.
1868 bool i915_gpu_raise(void)
1870 struct drm_i915_private *i915;
1871 struct intel_rps *rps;
1873 i915 = mchdev_get();
1877 rps = &i915->gt.rps;
1879 spin_lock_irq(&mchdev_lock);
1880 if (rps->max_freq_softlimit < rps->max_freq)
1881 rps->max_freq_softlimit++;
1882 spin_unlock_irq(&mchdev_lock);
1884 drm_dev_put(&i915->drm);
1887 EXPORT_SYMBOL_GPL(i915_gpu_raise);
1890 * i915_gpu_lower - lower GPU frequency limit
1892 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1893 * frequency maximum.
1895 bool i915_gpu_lower(void)
1897 struct drm_i915_private *i915;
1898 struct intel_rps *rps;
1900 i915 = mchdev_get();
1904 rps = &i915->gt.rps;
1906 spin_lock_irq(&mchdev_lock);
1907 if (rps->max_freq_softlimit > rps->min_freq)
1908 rps->max_freq_softlimit--;
1909 spin_unlock_irq(&mchdev_lock);
1911 drm_dev_put(&i915->drm);
1914 EXPORT_SYMBOL_GPL(i915_gpu_lower);
1917 * i915_gpu_busy - indicate GPU business to IPS
1919 * Tell the IPS driver whether or not the GPU is busy.
1921 bool i915_gpu_busy(void)
1923 struct drm_i915_private *i915;
1926 i915 = mchdev_get();
1930 ret = i915->gt.awake;
1932 drm_dev_put(&i915->drm);
1935 EXPORT_SYMBOL_GPL(i915_gpu_busy);
1938 * i915_gpu_turbo_disable - disable graphics turbo
1940 * Disable graphics turbo by resetting the max frequency and setting the
1941 * current frequency to the default.
1943 bool i915_gpu_turbo_disable(void)
1945 struct drm_i915_private *i915;
1946 struct intel_rps *rps;
1949 i915 = mchdev_get();
1953 rps = &i915->gt.rps;
1955 spin_lock_irq(&mchdev_lock);
1956 rps->max_freq_softlimit = rps->min_freq;
1957 ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
1958 spin_unlock_irq(&mchdev_lock);
1960 drm_dev_put(&i915->drm);
1963 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1965 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1966 #include "selftest_rps.c"