2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <drm/i915_drm.h>
11 #include "intel_gt_irq.h"
12 #include "intel_gt_pm_irq.h"
13 #include "intel_rps.h"
14 #include "intel_sideband.h"
15 #include "../../../platform/x86/intel_ips.h"
18 * Lock protecting IPS related data structures
20 static DEFINE_SPINLOCK(mchdev_lock);
22 static struct intel_gt *rps_to_gt(struct intel_rps *rps)
24 return container_of(rps, struct intel_gt, rps);
27 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
29 return rps_to_gt(rps)->i915;
32 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
34 return rps_to_gt(rps)->uncore;
37 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
39 return mask & ~rps->pm_intrmsk_mbz;
42 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
44 intel_uncore_write_fw(uncore, reg, val);
47 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
51 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
52 if (val > rps->min_freq_softlimit)
53 mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
54 GEN6_PM_RP_DOWN_THRESHOLD |
55 GEN6_PM_RP_DOWN_TIMEOUT);
57 if (val < rps->max_freq_softlimit)
58 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
60 mask &= READ_ONCE(rps->pm_events);
62 return rps_pm_sanitize_mask(rps, ~mask);
65 static void rps_reset_ei(struct intel_rps *rps)
67 memset(&rps->ei, 0, sizeof(rps->ei));
70 static void rps_enable_interrupts(struct intel_rps *rps)
72 struct intel_gt *gt = rps_to_gt(rps);
77 if (IS_VALLEYVIEW(gt->i915))
78 /* WaGsvRC0ResidencyMethod:vlv */
79 events = GEN6_PM_RP_UP_EI_EXPIRED;
81 events = (GEN6_PM_RP_UP_THRESHOLD |
82 GEN6_PM_RP_DOWN_THRESHOLD |
83 GEN6_PM_RP_DOWN_TIMEOUT);
84 WRITE_ONCE(rps->pm_events, events);
86 spin_lock_irq(>->irq_lock);
87 gen6_gt_pm_enable_irq(gt, rps->pm_events);
88 spin_unlock_irq(>->irq_lock);
90 intel_uncore_write(gt->uncore,
91 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
94 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
96 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
99 static void gen11_rps_reset_interrupts(struct intel_rps *rps)
101 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
105 static void rps_reset_interrupts(struct intel_rps *rps)
107 struct intel_gt *gt = rps_to_gt(rps);
109 spin_lock_irq(>->irq_lock);
110 if (INTEL_GEN(gt->i915) >= 11)
111 gen11_rps_reset_interrupts(rps);
113 gen6_rps_reset_interrupts(rps);
116 spin_unlock_irq(>->irq_lock);
119 static void rps_disable_interrupts(struct intel_rps *rps)
121 struct intel_gt *gt = rps_to_gt(rps);
123 WRITE_ONCE(rps->pm_events, 0);
125 intel_uncore_write(gt->uncore,
126 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
128 spin_lock_irq(>->irq_lock);
129 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
130 spin_unlock_irq(>->irq_lock);
132 intel_synchronize_irq(gt->i915);
135 * Now that we will not be generating any more work, flush any
136 * outstanding tasks. As we are called on the RPS idle path,
137 * we will reset the GPU to minimum frequencies, so the current
138 * state of the worker can be discarded.
140 cancel_work_sync(&rps->work);
142 rps_reset_interrupts(rps);
145 static const struct cparams {
151 { 1, 1333, 301, 28664 },
152 { 1, 1066, 294, 24460 },
153 { 1, 800, 294, 25192 },
154 { 0, 1333, 276, 27605 },
155 { 0, 1066, 276, 27605 },
156 { 0, 800, 231, 23784 },
159 static void gen5_rps_init(struct intel_rps *rps)
161 struct drm_i915_private *i915 = rps_to_i915(rps);
162 struct intel_uncore *uncore = rps_to_uncore(rps);
163 u8 fmax, fmin, fstart;
167 if (i915->fsb_freq <= 3200)
169 else if (i915->fsb_freq <= 4800)
174 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
175 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
176 rps->ips.m = cparams[i].m;
177 rps->ips.c = cparams[i].c;
182 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
184 /* Set up min, max, and cur for interrupt handling */
185 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
186 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
187 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
188 MEMMODE_FSTART_SHIFT;
189 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
192 rps->min_freq = fmax;
193 rps->max_freq = fmin;
195 rps->idle_freq = rps->min_freq;
196 rps->cur_freq = rps->idle_freq;
200 __ips_chipset_val(struct intel_ips *ips)
202 struct intel_uncore *uncore =
203 rps_to_uncore(container_of(ips, struct intel_rps, ips));
204 unsigned long now = jiffies_to_msecs(jiffies), dt;
205 unsigned long result;
208 lockdep_assert_held(&mchdev_lock);
211 * Prevent division-by-zero if we are asking too fast.
212 * Also, we don't get interesting results if we are polling
213 * faster than once in 10ms, so just return the saved value
216 dt = now - ips->last_time1;
218 return ips->chipset_power;
220 /* FIXME: handle per-counter overflow */
221 total = intel_uncore_read(uncore, DMIEC);
222 total += intel_uncore_read(uncore, DDREC);
223 total += intel_uncore_read(uncore, CSIEC);
225 delta = total - ips->last_count1;
227 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
229 ips->last_count1 = total;
230 ips->last_time1 = now;
232 ips->chipset_power = result;
237 static unsigned long ips_mch_val(struct intel_uncore *uncore)
239 unsigned int m, x, b;
242 tsfs = intel_uncore_read(uncore, TSFS);
243 x = intel_uncore_read8(uncore, TR1);
245 b = tsfs & TSFS_INTR_MASK;
246 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
248 return m * x / 127 - b;
251 static int _pxvid_to_vd(u8 pxvid)
256 if (pxvid >= 8 && pxvid < 31)
259 return (pxvid + 2) * 125;
262 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
264 const int vd = _pxvid_to_vd(pxvid);
266 if (INTEL_INFO(i915)->is_mobile)
267 return max(vd - 1125, 0);
272 static void __gen5_ips_update(struct intel_ips *ips)
274 struct intel_uncore *uncore =
275 rps_to_uncore(container_of(ips, struct intel_rps, ips));
279 lockdep_assert_held(&mchdev_lock);
281 now = ktime_get_raw_ns();
282 dt = now - ips->last_time2;
283 do_div(dt, NSEC_PER_MSEC);
285 /* Don't divide by 0 */
289 count = intel_uncore_read(uncore, GFXEC);
290 delta = count - ips->last_count2;
292 ips->last_count2 = count;
293 ips->last_time2 = now;
295 /* More magic constants... */
296 ips->gfx_power = div_u64(delta * 1181, dt * 10);
299 static void gen5_rps_update(struct intel_rps *rps)
301 spin_lock_irq(&mchdev_lock);
302 __gen5_ips_update(&rps->ips);
303 spin_unlock_irq(&mchdev_lock);
306 static bool gen5_rps_set(struct intel_rps *rps, u8 val)
308 struct intel_uncore *uncore = rps_to_uncore(rps);
311 lockdep_assert_held(&mchdev_lock);
313 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
314 if (rgvswctl & MEMCTL_CMD_STS) {
315 DRM_DEBUG("gpu busy, RCS change rejected\n");
316 return false; /* still busy with another command */
319 /* Invert the frequency bin into an ips delay */
320 val = rps->max_freq - val;
321 val = rps->min_freq + val;
324 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
325 (val << MEMCTL_FREQ_SHIFT) |
327 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
328 intel_uncore_posting_read16(uncore, MEMSWCTL);
330 rgvswctl |= MEMCTL_CMD_STS;
331 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
336 static unsigned long intel_pxfreq(u32 vidfreq)
338 int div = (vidfreq & 0x3f0000) >> 16;
339 int post = (vidfreq & 0x3000) >> 12;
340 int pre = (vidfreq & 0x7);
345 return div * 133333 / (pre << post);
348 static unsigned int init_emon(struct intel_uncore *uncore)
353 /* Disable to program */
354 intel_uncore_write(uncore, ECR, 0);
355 intel_uncore_posting_read(uncore, ECR);
357 /* Program energy weights for various events */
358 intel_uncore_write(uncore, SDEW, 0x15040d00);
359 intel_uncore_write(uncore, CSIEW0, 0x007f0000);
360 intel_uncore_write(uncore, CSIEW1, 0x1e220004);
361 intel_uncore_write(uncore, CSIEW2, 0x04000004);
363 for (i = 0; i < 5; i++)
364 intel_uncore_write(uncore, PEW(i), 0);
365 for (i = 0; i < 3; i++)
366 intel_uncore_write(uncore, DEW(i), 0);
368 /* Program P-state weights to account for frequency power adjustment */
369 for (i = 0; i < 16; i++) {
370 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
371 unsigned int freq = intel_pxfreq(pxvidfreq);
373 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
376 val = vid * vid * freq / 1000 * 255;
377 val /= 127 * 127 * 900;
381 /* Render standby states get 0 weight */
385 for (i = 0; i < 4; i++) {
386 intel_uncore_write(uncore, PXW(i),
387 pxw[i * 4 + 0] << 24 |
388 pxw[i * 4 + 1] << 16 |
389 pxw[i * 4 + 2] << 8 |
390 pxw[i * 4 + 3] << 0);
393 /* Adjust magic regs to magic values (more experimental results) */
394 intel_uncore_write(uncore, OGW0, 0);
395 intel_uncore_write(uncore, OGW1, 0);
396 intel_uncore_write(uncore, EG0, 0x00007f00);
397 intel_uncore_write(uncore, EG1, 0x0000000e);
398 intel_uncore_write(uncore, EG2, 0x000e0000);
399 intel_uncore_write(uncore, EG3, 0x68000300);
400 intel_uncore_write(uncore, EG4, 0x42000000);
401 intel_uncore_write(uncore, EG5, 0x00140031);
402 intel_uncore_write(uncore, EG6, 0);
403 intel_uncore_write(uncore, EG7, 0);
405 for (i = 0; i < 8; i++)
406 intel_uncore_write(uncore, PXWL(i), 0);
408 /* Enable PMON + select events */
409 intel_uncore_write(uncore, ECR, 0x80000019);
411 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
414 static bool gen5_rps_enable(struct intel_rps *rps)
416 struct intel_uncore *uncore = rps_to_uncore(rps);
420 spin_lock_irq(&mchdev_lock);
422 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
424 /* Enable temp reporting */
425 intel_uncore_write16(uncore, PMMISC,
426 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
427 intel_uncore_write16(uncore, TSC1,
428 intel_uncore_read16(uncore, TSC1) | TSE);
430 /* 100ms RC evaluation intervals */
431 intel_uncore_write(uncore, RCUPEI, 100000);
432 intel_uncore_write(uncore, RCDNEI, 100000);
434 /* Set max/min thresholds to 90ms and 80ms respectively */
435 intel_uncore_write(uncore, RCBMAXAVG, 90000);
436 intel_uncore_write(uncore, RCBMINAVG, 80000);
438 intel_uncore_write(uncore, MEMIHYST, 1);
440 /* Set up min, max, and cur for interrupt handling */
441 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
442 MEMMODE_FSTART_SHIFT;
444 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
445 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
447 intel_uncore_write(uncore,
449 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
451 intel_uncore_write(uncore, VIDSTART, vstart);
452 intel_uncore_posting_read(uncore, VIDSTART);
454 rgvmodectl |= MEMMODE_SWMODE_EN;
455 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
457 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
458 MEMCTL_CMD_STS) == 0, 10))
459 DRM_ERROR("stuck trying to change perf mode\n");
462 gen5_rps_set(rps, rps->cur_freq);
464 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
465 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
466 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
467 rps->ips.last_time1 = jiffies_to_msecs(jiffies);
469 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
470 rps->ips.last_time2 = ktime_get_raw_ns();
472 spin_unlock_irq(&mchdev_lock);
474 rps->ips.corr = init_emon(uncore);
479 static void gen5_rps_disable(struct intel_rps *rps)
481 struct intel_uncore *uncore = rps_to_uncore(rps);
484 spin_lock_irq(&mchdev_lock);
486 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
488 /* Ack interrupts, disable EFC interrupt */
489 intel_uncore_write(uncore, MEMINTREN,
490 intel_uncore_read(uncore, MEMINTREN) &
491 ~MEMINT_EVAL_CHG_EN);
492 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
493 intel_uncore_write(uncore, DEIER,
494 intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
495 intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
496 intel_uncore_write(uncore, DEIMR,
497 intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
499 /* Go back to the starting frequency */
500 gen5_rps_set(rps, rps->idle_freq);
502 rgvswctl |= MEMCTL_CMD_STS;
503 intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
506 spin_unlock_irq(&mchdev_lock);
509 static u32 rps_limits(struct intel_rps *rps, u8 val)
514 * Only set the down limit when we've reached the lowest level to avoid
515 * getting more interrupts, otherwise leave this clear. This prevents a
516 * race in the hw when coming out of rc6: There's a tiny window where
517 * the hw runs at the minimal clock before selecting the desired
518 * frequency, if the down threshold expires in that window we will not
519 * receive a down interrupt.
521 if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
522 limits = rps->max_freq_softlimit << 23;
523 if (val <= rps->min_freq_softlimit)
524 limits |= rps->min_freq_softlimit << 14;
526 limits = rps->max_freq_softlimit << 24;
527 if (val <= rps->min_freq_softlimit)
528 limits |= rps->min_freq_softlimit << 16;
534 static void rps_set_power(struct intel_rps *rps, int new_power)
536 struct intel_uncore *uncore = rps_to_uncore(rps);
537 struct drm_i915_private *i915 = rps_to_i915(rps);
538 u32 threshold_up = 0, threshold_down = 0; /* in % */
539 u32 ei_up = 0, ei_down = 0;
541 lockdep_assert_held(&rps->power.mutex);
543 if (new_power == rps->power.mode)
546 /* Note the units here are not exactly 1us, but 1280ns. */
549 /* Upclock if more than 95% busy over 16ms */
553 /* Downclock if less than 85% busy over 32ms */
559 /* Upclock if more than 90% busy over 13ms */
563 /* Downclock if less than 75% busy over 32ms */
569 /* Upclock if more than 85% busy over 10ms */
573 /* Downclock if less than 60% busy over 32ms */
579 /* When byt can survive without system hang with dynamic
580 * sw freq adjustments, this restriction can be lifted.
582 if (IS_VALLEYVIEW(i915))
585 set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
586 set(uncore, GEN6_RP_UP_THRESHOLD,
587 GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
589 set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
590 set(uncore, GEN6_RP_DOWN_THRESHOLD,
591 GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
593 set(uncore, GEN6_RP_CONTROL,
594 (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
595 GEN6_RP_MEDIA_HW_NORMAL_MODE |
596 GEN6_RP_MEDIA_IS_GFX |
598 GEN6_RP_UP_BUSY_AVG |
599 GEN6_RP_DOWN_IDLE_AVG);
602 rps->power.mode = new_power;
603 rps->power.up_threshold = threshold_up;
604 rps->power.down_threshold = threshold_down;
607 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
611 new_power = rps->power.mode;
612 switch (rps->power.mode) {
614 if (val > rps->efficient_freq + 1 &&
620 if (val <= rps->efficient_freq &&
622 new_power = LOW_POWER;
623 else if (val >= rps->rp0_freq &&
625 new_power = HIGH_POWER;
629 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
634 /* Max/min bins are special */
635 if (val <= rps->min_freq_softlimit)
636 new_power = LOW_POWER;
637 if (val >= rps->max_freq_softlimit)
638 new_power = HIGH_POWER;
640 mutex_lock(&rps->power.mutex);
641 if (rps->power.interactive)
642 new_power = HIGH_POWER;
643 rps_set_power(rps, new_power);
644 mutex_unlock(&rps->power.mutex);
647 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
649 mutex_lock(&rps->power.mutex);
651 if (!rps->power.interactive++ && READ_ONCE(rps->active))
652 rps_set_power(rps, HIGH_POWER);
654 GEM_BUG_ON(!rps->power.interactive);
655 rps->power.interactive--;
657 mutex_unlock(&rps->power.mutex);
660 static int gen6_rps_set(struct intel_rps *rps, u8 val)
662 struct intel_uncore *uncore = rps_to_uncore(rps);
663 struct drm_i915_private *i915 = rps_to_i915(rps);
666 if (INTEL_GEN(i915) >= 9)
667 swreq = GEN9_FREQUENCY(val);
668 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
669 swreq = HSW_FREQUENCY(val);
671 swreq = (GEN6_FREQUENCY(val) |
673 GEN6_AGGRESSIVE_TURBO);
674 set(uncore, GEN6_RPNSWREQ, swreq);
679 static int vlv_rps_set(struct intel_rps *rps, u8 val)
681 struct drm_i915_private *i915 = rps_to_i915(rps);
685 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
691 static int rps_set(struct intel_rps *rps, u8 val, bool update)
693 struct drm_i915_private *i915 = rps_to_i915(rps);
696 if (INTEL_GEN(i915) < 6)
699 if (val == rps->last_freq)
702 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
703 err = vlv_rps_set(rps, val);
705 err = gen6_rps_set(rps, val);
710 gen6_rps_set_thresholds(rps, val);
711 rps->last_freq = val;
716 void intel_rps_unpark(struct intel_rps *rps)
724 * Use the user's desired frequency as a guide, but for better
725 * performance, jump directly to RPe as our starting frequency.
727 mutex_lock(&rps->lock);
729 WRITE_ONCE(rps->active, true);
731 freq = max(rps->cur_freq, rps->efficient_freq),
732 freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
733 intel_rps_set(rps, freq);
737 mutex_unlock(&rps->lock);
739 if (INTEL_GEN(rps_to_i915(rps)) >= 6)
740 rps_enable_interrupts(rps);
742 if (IS_GEN(rps_to_i915(rps), 5))
743 gen5_rps_update(rps);
746 void intel_rps_park(struct intel_rps *rps)
748 struct drm_i915_private *i915 = rps_to_i915(rps);
753 if (INTEL_GEN(i915) >= 6)
754 rps_disable_interrupts(rps);
756 WRITE_ONCE(rps->active, false);
757 if (rps->last_freq <= rps->idle_freq)
761 * The punit delays the write of the frequency and voltage until it
762 * determines the GPU is awake. During normal usage we don't want to
763 * waste power changing the frequency if the GPU is sleeping (rc6).
764 * However, the GPU and driver is now idle and we do not want to delay
765 * switching to minimum voltage (reducing power whilst idle) as we do
766 * not expect to be woken in the near future and so must flush the
767 * change by waking the device.
769 * We choose to take the media powerwell (either would do to trick the
770 * punit into committing the voltage change) as that takes a lot less
771 * power than the render powerwell.
773 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
774 rps_set(rps, rps->idle_freq, false);
775 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
778 * Since we will try and restart from the previously requested
779 * frequency on unparking, treat this idle point as a downclock
780 * interrupt and reduce the frequency for resume. If we park/unpark
781 * more frequently than the rps worker can run, we will not respond
782 * to any EI and never see a change in frequency.
784 * (Note we accommodate Cherryview's limitation of only using an
785 * even bin by applying it to all.)
788 max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
791 void intel_rps_boost(struct i915_request *rq)
793 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
796 if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
799 /* Serializes with i915_request_retire() */
800 spin_lock_irqsave(&rq->lock, flags);
801 if (!i915_request_has_waitboost(rq) &&
802 !dma_fence_is_signaled_locked(&rq->fence)) {
803 set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
805 if (!atomic_fetch_inc(&rps->num_waiters) &&
806 READ_ONCE(rps->cur_freq) < rps->boost_freq)
807 schedule_work(&rps->work);
809 atomic_inc(&rps->boosts);
811 spin_unlock_irqrestore(&rq->lock, flags);
814 int intel_rps_set(struct intel_rps *rps, u8 val)
818 lockdep_assert_held(&rps->lock);
819 GEM_BUG_ON(val > rps->max_freq);
820 GEM_BUG_ON(val < rps->min_freq);
823 err = rps_set(rps, val, true);
828 * Make sure we continue to get interrupts
829 * until we hit the minimum or maximum frequencies.
831 if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
832 struct intel_uncore *uncore = rps_to_uncore(rps);
835 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
837 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
845 static void gen6_rps_init(struct intel_rps *rps)
847 struct drm_i915_private *i915 = rps_to_i915(rps);
848 struct intel_uncore *uncore = rps_to_uncore(rps);
850 /* All of these values are in units of 50MHz */
852 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
853 if (IS_GEN9_LP(i915)) {
854 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
856 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
857 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
858 rps->min_freq = (rp_state_cap >> 0) & 0xff;
860 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
862 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
863 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
864 rps->min_freq = (rp_state_cap >> 16) & 0xff;
867 /* hw_max = RP0 until we check for overclocking */
868 rps->max_freq = rps->rp0_freq;
870 rps->efficient_freq = rps->rp1_freq;
871 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
872 IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
875 if (sandybridge_pcode_read(i915,
876 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
877 &ddcc_status, NULL) == 0)
878 rps->efficient_freq =
880 (ddcc_status >> 8) & 0xff,
885 if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
886 /* Store the frequency values in 16.66 MHZ units, which is
887 * the natural hardware unit for SKL
889 rps->rp0_freq *= GEN9_FREQ_SCALER;
890 rps->rp1_freq *= GEN9_FREQ_SCALER;
891 rps->min_freq *= GEN9_FREQ_SCALER;
892 rps->max_freq *= GEN9_FREQ_SCALER;
893 rps->efficient_freq *= GEN9_FREQ_SCALER;
897 static bool rps_reset(struct intel_rps *rps)
900 rps->power.mode = -1;
903 if (rps_set(rps, rps->min_freq, true)) {
904 DRM_ERROR("Failed to reset RPS to initial values\n");
908 rps->cur_freq = rps->min_freq;
912 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
913 static bool gen9_rps_enable(struct intel_rps *rps)
915 struct drm_i915_private *i915 = rps_to_i915(rps);
916 struct intel_uncore *uncore = rps_to_uncore(rps);
918 /* Program defaults and thresholds for RPS */
920 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
921 GEN9_FREQUENCY(rps->rp1_freq));
923 /* 1 second timeout */
924 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
925 GT_INTERVAL_FROM_US(i915, 1000000));
927 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
929 return rps_reset(rps);
932 static bool gen8_rps_enable(struct intel_rps *rps)
934 struct intel_uncore *uncore = rps_to_uncore(rps);
936 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
937 HSW_FREQUENCY(rps->rp1_freq));
939 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
940 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
941 100000000 / 128); /* 1 second timeout */
943 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
945 return rps_reset(rps);
948 static bool gen6_rps_enable(struct intel_rps *rps)
950 struct intel_uncore *uncore = rps_to_uncore(rps);
952 /* Power down if completely idle for over 50ms */
953 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
954 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
956 return rps_reset(rps);
959 static int chv_rps_max_freq(struct intel_rps *rps)
961 struct drm_i915_private *i915 = rps_to_i915(rps);
964 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
966 switch (RUNTIME_INFO(i915)->sseu.eu_total) {
969 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
973 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
978 /* Setting (2 * 8) Min RP0 for any other combination */
979 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
983 return val & FB_GFX_FREQ_FUSE_MASK;
986 static int chv_rps_rpe_freq(struct intel_rps *rps)
988 struct drm_i915_private *i915 = rps_to_i915(rps);
991 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
992 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
994 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
997 static int chv_rps_guar_freq(struct intel_rps *rps)
999 struct drm_i915_private *i915 = rps_to_i915(rps);
1002 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1004 return val & FB_GFX_FREQ_FUSE_MASK;
1007 static u32 chv_rps_min_freq(struct intel_rps *rps)
1009 struct drm_i915_private *i915 = rps_to_i915(rps);
1012 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1013 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1015 return val & FB_GFX_FREQ_FUSE_MASK;
1018 static bool chv_rps_enable(struct intel_rps *rps)
1020 struct intel_uncore *uncore = rps_to_uncore(rps);
1021 struct drm_i915_private *i915 = rps_to_i915(rps);
1024 /* 1: Program defaults and thresholds for RPS*/
1025 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1026 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1027 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1028 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1029 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1031 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1034 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1035 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1036 GEN6_RP_MEDIA_IS_GFX |
1038 GEN6_RP_UP_BUSY_AVG |
1039 GEN6_RP_DOWN_IDLE_AVG);
1041 /* Setting Fixed Bias */
1042 vlv_punit_get(i915);
1044 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1045 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1047 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1049 vlv_punit_put(i915);
1051 /* RPS code assumes GPLL is used */
1052 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1053 "GPLL not enabled\n");
1055 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1056 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
1058 return rps_reset(rps);
1061 static int vlv_rps_guar_freq(struct intel_rps *rps)
1063 struct drm_i915_private *i915 = rps_to_i915(rps);
1066 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1068 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1069 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1074 static int vlv_rps_max_freq(struct intel_rps *rps)
1076 struct drm_i915_private *i915 = rps_to_i915(rps);
1079 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1081 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1083 rp0 = min_t(u32, rp0, 0xea);
1088 static int vlv_rps_rpe_freq(struct intel_rps *rps)
1090 struct drm_i915_private *i915 = rps_to_i915(rps);
1093 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1094 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1095 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1096 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1101 static int vlv_rps_min_freq(struct intel_rps *rps)
1103 struct drm_i915_private *i915 = rps_to_i915(rps);
1106 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1108 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1109 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1110 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1111 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1112 * to make sure it matches what Punit accepts.
1114 return max_t(u32, val, 0xc0);
1117 static bool vlv_rps_enable(struct intel_rps *rps)
1119 struct intel_uncore *uncore = rps_to_uncore(rps);
1120 struct drm_i915_private *i915 = rps_to_i915(rps);
1123 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1124 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1125 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1126 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1127 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1129 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1131 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1132 GEN6_RP_MEDIA_TURBO |
1133 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1134 GEN6_RP_MEDIA_IS_GFX |
1136 GEN6_RP_UP_BUSY_AVG |
1137 GEN6_RP_DOWN_IDLE_CONT);
1139 vlv_punit_get(i915);
1141 /* Setting Fixed Bias */
1142 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1143 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1145 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1147 vlv_punit_put(i915);
1149 /* RPS code assumes GPLL is used */
1150 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1151 "GPLL not enabled\n");
1153 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1154 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
1156 return rps_reset(rps);
1159 static unsigned long __ips_gfx_val(struct intel_ips *ips)
1161 struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1162 struct intel_uncore *uncore = rps_to_uncore(rps);
1163 unsigned long t, corr, state1, corr2, state2;
1166 lockdep_assert_held(&mchdev_lock);
1168 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1169 pxvid = (pxvid >> 24) & 0x7f;
1170 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1174 /* Revel in the empirically derived constants */
1176 /* Correction factor in 1/100000 units */
1177 t = ips_mch_val(uncore);
1179 corr = t * 2349 + 135940;
1181 corr = t * 964 + 29317;
1183 corr = t * 301 + 1004;
1185 corr = corr * 150142 * state1 / 10000 - 78642;
1187 corr2 = corr * ips->corr;
1189 state2 = corr2 * state1 / 10000;
1190 state2 /= 100; /* convert to mW */
1192 __gen5_ips_update(ips);
1194 return ips->gfx_power + state2;
1197 void intel_rps_enable(struct intel_rps *rps)
1199 struct drm_i915_private *i915 = rps_to_i915(rps);
1200 struct intel_uncore *uncore = rps_to_uncore(rps);
1202 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1203 if (IS_CHERRYVIEW(i915))
1204 rps->enabled = chv_rps_enable(rps);
1205 else if (IS_VALLEYVIEW(i915))
1206 rps->enabled = vlv_rps_enable(rps);
1207 else if (INTEL_GEN(i915) >= 9)
1208 rps->enabled = gen9_rps_enable(rps);
1209 else if (INTEL_GEN(i915) >= 8)
1210 rps->enabled = gen8_rps_enable(rps);
1211 else if (INTEL_GEN(i915) >= 6)
1212 rps->enabled = gen6_rps_enable(rps);
1213 else if (IS_IRONLAKE_M(i915))
1214 rps->enabled = gen5_rps_enable(rps);
1215 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1219 drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
1220 drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
1222 drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
1223 drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
1226 static void gen6_rps_disable(struct intel_rps *rps)
1228 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1231 void intel_rps_disable(struct intel_rps *rps)
1233 struct drm_i915_private *i915 = rps_to_i915(rps);
1235 rps->enabled = false;
1237 if (INTEL_GEN(i915) >= 6)
1238 gen6_rps_disable(rps);
1239 else if (IS_IRONLAKE_M(i915))
1240 gen5_rps_disable(rps);
1243 static int byt_gpu_freq(struct intel_rps *rps, int val)
1247 * Slow = Fast = GPLL ref * N
1249 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1252 static int byt_freq_opcode(struct intel_rps *rps, int val)
1254 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1257 static int chv_gpu_freq(struct intel_rps *rps, int val)
1261 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1263 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1266 static int chv_freq_opcode(struct intel_rps *rps, int val)
1268 /* CHV needs even values */
1269 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1272 int intel_gpu_freq(struct intel_rps *rps, int val)
1274 struct drm_i915_private *i915 = rps_to_i915(rps);
1276 if (INTEL_GEN(i915) >= 9)
1277 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1279 else if (IS_CHERRYVIEW(i915))
1280 return chv_gpu_freq(rps, val);
1281 else if (IS_VALLEYVIEW(i915))
1282 return byt_gpu_freq(rps, val);
1284 return val * GT_FREQUENCY_MULTIPLIER;
1287 int intel_freq_opcode(struct intel_rps *rps, int val)
1289 struct drm_i915_private *i915 = rps_to_i915(rps);
1291 if (INTEL_GEN(i915) >= 9)
1292 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1293 GT_FREQUENCY_MULTIPLIER);
1294 else if (IS_CHERRYVIEW(i915))
1295 return chv_freq_opcode(rps, val);
1296 else if (IS_VALLEYVIEW(i915))
1297 return byt_freq_opcode(rps, val);
1299 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1302 static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1304 struct drm_i915_private *i915 = rps_to_i915(rps);
1306 rps->gpll_ref_freq =
1307 vlv_get_cck_clock(i915, "GPLL ref",
1308 CCK_GPLL_CLOCK_CONTROL,
1311 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
1314 static void vlv_rps_init(struct intel_rps *rps)
1316 struct drm_i915_private *i915 = rps_to_i915(rps);
1319 vlv_iosf_sb_get(i915,
1320 BIT(VLV_IOSF_SB_PUNIT) |
1321 BIT(VLV_IOSF_SB_NC) |
1322 BIT(VLV_IOSF_SB_CCK));
1324 vlv_init_gpll_ref_freq(rps);
1326 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1327 switch ((val >> 6) & 3) {
1330 i915->mem_freq = 800;
1333 i915->mem_freq = 1066;
1336 i915->mem_freq = 1333;
1339 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
1341 rps->max_freq = vlv_rps_max_freq(rps);
1342 rps->rp0_freq = rps->max_freq;
1343 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
1344 intel_gpu_freq(rps, rps->max_freq),
1347 rps->efficient_freq = vlv_rps_rpe_freq(rps);
1348 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
1349 intel_gpu_freq(rps, rps->efficient_freq),
1350 rps->efficient_freq);
1352 rps->rp1_freq = vlv_rps_guar_freq(rps);
1353 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1354 intel_gpu_freq(rps, rps->rp1_freq),
1357 rps->min_freq = vlv_rps_min_freq(rps);
1358 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
1359 intel_gpu_freq(rps, rps->min_freq),
1362 vlv_iosf_sb_put(i915,
1363 BIT(VLV_IOSF_SB_PUNIT) |
1364 BIT(VLV_IOSF_SB_NC) |
1365 BIT(VLV_IOSF_SB_CCK));
1368 static void chv_rps_init(struct intel_rps *rps)
1370 struct drm_i915_private *i915 = rps_to_i915(rps);
1373 vlv_iosf_sb_get(i915,
1374 BIT(VLV_IOSF_SB_PUNIT) |
1375 BIT(VLV_IOSF_SB_NC) |
1376 BIT(VLV_IOSF_SB_CCK));
1378 vlv_init_gpll_ref_freq(rps);
1380 val = vlv_cck_read(i915, CCK_FUSE_REG);
1382 switch ((val >> 2) & 0x7) {
1384 i915->mem_freq = 2000;
1387 i915->mem_freq = 1600;
1390 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
1392 rps->max_freq = chv_rps_max_freq(rps);
1393 rps->rp0_freq = rps->max_freq;
1394 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
1395 intel_gpu_freq(rps, rps->max_freq),
1398 rps->efficient_freq = chv_rps_rpe_freq(rps);
1399 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
1400 intel_gpu_freq(rps, rps->efficient_freq),
1401 rps->efficient_freq);
1403 rps->rp1_freq = chv_rps_guar_freq(rps);
1404 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
1405 intel_gpu_freq(rps, rps->rp1_freq),
1408 rps->min_freq = chv_rps_min_freq(rps);
1409 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
1410 intel_gpu_freq(rps, rps->min_freq),
1413 vlv_iosf_sb_put(i915,
1414 BIT(VLV_IOSF_SB_PUNIT) |
1415 BIT(VLV_IOSF_SB_NC) |
1416 BIT(VLV_IOSF_SB_CCK));
1418 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1419 rps->rp1_freq | rps->min_freq) & 1,
1420 "Odd GPU freq values\n");
1423 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1425 ei->ktime = ktime_get_raw();
1426 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1427 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1430 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1432 struct intel_uncore *uncore = rps_to_uncore(rps);
1433 const struct intel_rps_ei *prev = &rps->ei;
1434 struct intel_rps_ei now;
1437 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1440 vlv_c0_read(uncore, &now);
1446 time = ktime_us_delta(now.ktime, prev->ktime);
1448 time *= rps_to_i915(rps)->czclk_freq;
1450 /* Workload can be split between render + media,
1451 * e.g. SwapBuffers being blitted in X after being rendered in
1452 * mesa. To account for this we need to combine both engines
1453 * into our activity counter.
1455 render = now.render_c0 - prev->render_c0;
1456 media = now.media_c0 - prev->media_c0;
1457 c0 = max(render, media);
1458 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1460 if (c0 > time * rps->power.up_threshold)
1461 events = GEN6_PM_RP_UP_THRESHOLD;
1462 else if (c0 < time * rps->power.down_threshold)
1463 events = GEN6_PM_RP_DOWN_THRESHOLD;
1470 static void rps_work(struct work_struct *work)
1472 struct intel_rps *rps = container_of(work, typeof(*rps), work);
1473 struct intel_gt *gt = rps_to_gt(rps);
1474 bool client_boost = false;
1475 int new_freq, adj, min, max;
1478 spin_lock_irq(>->irq_lock);
1479 pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
1480 client_boost = atomic_read(&rps->num_waiters);
1481 spin_unlock_irq(>->irq_lock);
1483 /* Make sure we didn't queue anything we're not going to process. */
1484 if (!pm_iir && !client_boost)
1487 mutex_lock(&rps->lock);
1489 pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1491 adj = rps->last_adj;
1492 new_freq = rps->cur_freq;
1493 min = rps->min_freq_softlimit;
1494 max = rps->max_freq_softlimit;
1496 max = rps->max_freq;
1497 if (client_boost && new_freq < rps->boost_freq) {
1498 new_freq = rps->boost_freq;
1500 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1503 else /* CHV needs even encode values */
1504 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1506 if (new_freq >= rps->max_freq_softlimit)
1508 } else if (client_boost) {
1510 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1511 if (rps->cur_freq > rps->efficient_freq)
1512 new_freq = rps->efficient_freq;
1513 else if (rps->cur_freq > rps->min_freq_softlimit)
1514 new_freq = rps->min_freq_softlimit;
1516 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1519 else /* CHV needs even encode values */
1520 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1522 if (new_freq <= rps->min_freq_softlimit)
1524 } else { /* unknown event */
1528 rps->last_adj = adj;
1531 * Limit deboosting and boosting to keep ourselves at the extremes
1532 * when in the respective power modes (i.e. slowly decrease frequencies
1533 * while in the HIGH_POWER zone and slowly increase frequencies while
1534 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1535 * to the next level quickly, and conversely if busy we expect to
1536 * hit a waitboost and rapidly switch into max power.
1538 if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1539 (adj > 0 && rps->power.mode == LOW_POWER))
1542 /* sysfs frequency interfaces may have snuck in while servicing the
1546 new_freq = clamp_t(int, new_freq, min, max);
1548 if (intel_rps_set(rps, new_freq)) {
1549 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1553 mutex_unlock(&rps->lock);
1556 spin_lock_irq(>->irq_lock);
1557 gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1558 spin_unlock_irq(>->irq_lock);
1561 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1563 struct intel_gt *gt = rps_to_gt(rps);
1564 const u32 events = rps->pm_events & pm_iir;
1566 lockdep_assert_held(>->irq_lock);
1568 if (unlikely(!events))
1571 gen6_gt_pm_mask_irq(gt, events);
1573 rps->pm_iir |= events;
1574 schedule_work(&rps->work);
1577 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1579 struct intel_gt *gt = rps_to_gt(rps);
1582 events = pm_iir & READ_ONCE(rps->pm_events);
1584 spin_lock(>->irq_lock);
1586 gen6_gt_pm_mask_irq(gt, events);
1587 rps->pm_iir |= events;
1589 schedule_work(&rps->work);
1590 spin_unlock(>->irq_lock);
1593 if (INTEL_GEN(gt->i915) >= 8)
1596 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1597 intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
1599 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1600 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1603 void gen5_rps_irq_handler(struct intel_rps *rps)
1605 struct intel_uncore *uncore = rps_to_uncore(rps);
1606 u32 busy_up, busy_down, max_avg, min_avg;
1609 spin_lock(&mchdev_lock);
1611 intel_uncore_write16(uncore,
1613 intel_uncore_read(uncore, MEMINTRSTS));
1615 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1616 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1617 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1618 max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1619 min_avg = intel_uncore_read(uncore, RCBMINAVG);
1621 /* Handle RCS change request from hw */
1622 new_freq = rps->cur_freq;
1623 if (busy_up > max_avg)
1625 else if (busy_down < min_avg)
1627 new_freq = clamp(new_freq,
1628 rps->min_freq_softlimit,
1629 rps->max_freq_softlimit);
1631 if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
1632 rps->cur_freq = new_freq;
1634 spin_unlock(&mchdev_lock);
1637 void intel_rps_init_early(struct intel_rps *rps)
1639 mutex_init(&rps->lock);
1640 mutex_init(&rps->power.mutex);
1642 INIT_WORK(&rps->work, rps_work);
1644 atomic_set(&rps->num_waiters, 0);
1647 void intel_rps_init(struct intel_rps *rps)
1649 struct drm_i915_private *i915 = rps_to_i915(rps);
1651 if (IS_CHERRYVIEW(i915))
1653 else if (IS_VALLEYVIEW(i915))
1655 else if (INTEL_GEN(i915) >= 6)
1657 else if (IS_IRONLAKE_M(i915))
1660 /* Derive initial user preferences/limits from the hardware limits */
1661 rps->max_freq_softlimit = rps->max_freq;
1662 rps->min_freq_softlimit = rps->min_freq;
1664 /* After setting max-softlimit, find the overclock max freq */
1665 if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
1668 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
1670 if (params & BIT(31)) { /* OC supported */
1671 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1672 (rps->max_freq & 0xff) * 50,
1673 (params & 0xff) * 50);
1674 rps->max_freq = params & 0xff;
1678 /* Finally allow us to boost to max by default */
1679 rps->boost_freq = rps->max_freq;
1680 rps->idle_freq = rps->min_freq;
1681 rps->cur_freq = rps->idle_freq;
1683 rps->pm_intrmsk_mbz = 0;
1686 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1687 * if GEN6_PM_UP_EI_EXPIRED is masked.
1689 * TODO: verify if this can be reproduced on VLV,CHV.
1691 if (INTEL_GEN(i915) <= 7)
1692 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
1694 if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
1695 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1698 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
1700 struct drm_i915_private *i915 = rps_to_i915(rps);
1703 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1704 cagf = (rpstat >> 8) & 0xff;
1705 else if (INTEL_GEN(i915) >= 9)
1706 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1707 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1708 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1710 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1715 static u32 read_cagf(struct intel_rps *rps)
1717 struct drm_i915_private *i915 = rps_to_i915(rps);
1720 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1721 vlv_punit_get(i915);
1722 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1723 vlv_punit_put(i915);
1725 freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
1728 return intel_rps_get_cagf(rps, freq);
1731 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
1733 struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
1734 intel_wakeref_t wakeref;
1737 with_intel_runtime_pm_if_in_use(rpm, wakeref)
1738 freq = intel_gpu_freq(rps, read_cagf(rps));
1743 /* External interface for intel_ips.ko */
1745 static struct drm_i915_private __rcu *ips_mchdev;
1748 * Tells the intel_ips driver that the i915 driver is now loaded, if
1749 * IPS got loaded first.
1751 * This awkward dance is so that neither module has to depend on the
1752 * other in order for IPS to do the appropriate communication of
1753 * GPU turbo limits to i915.
1756 ips_ping_for_i915_load(void)
1760 link = symbol_get(ips_link_to_i915_driver);
1763 symbol_put(ips_link_to_i915_driver);
1767 void intel_rps_driver_register(struct intel_rps *rps)
1769 struct intel_gt *gt = rps_to_gt(rps);
1772 * We only register the i915 ips part with intel-ips once everything is
1773 * set up, to avoid intel-ips sneaking in and reading bogus values.
1775 if (IS_GEN(gt->i915, 5)) {
1776 GEM_BUG_ON(ips_mchdev);
1777 rcu_assign_pointer(ips_mchdev, gt->i915);
1778 ips_ping_for_i915_load();
1782 void intel_rps_driver_unregister(struct intel_rps *rps)
1784 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
1785 rcu_assign_pointer(ips_mchdev, NULL);
1788 static struct drm_i915_private *mchdev_get(void)
1790 struct drm_i915_private *i915;
1793 i915 = rcu_dereference(ips_mchdev);
1794 if (!kref_get_unless_zero(&i915->drm.ref))
1802 * i915_read_mch_val - return value for IPS use
1804 * Calculate and return a value for the IPS driver to use when deciding whether
1805 * we have thermal and power headroom to increase CPU or GPU power budget.
1807 unsigned long i915_read_mch_val(void)
1809 struct drm_i915_private *i915;
1810 unsigned long chipset_val = 0;
1811 unsigned long graphics_val = 0;
1812 intel_wakeref_t wakeref;
1814 i915 = mchdev_get();
1818 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1819 struct intel_ips *ips = &i915->gt.rps.ips;
1821 spin_lock_irq(&mchdev_lock);
1822 chipset_val = __ips_chipset_val(ips);
1823 graphics_val = __ips_gfx_val(ips);
1824 spin_unlock_irq(&mchdev_lock);
1827 drm_dev_put(&i915->drm);
1828 return chipset_val + graphics_val;
1830 EXPORT_SYMBOL_GPL(i915_read_mch_val);
1833 * i915_gpu_raise - raise GPU frequency limit
1835 * Raise the limit; IPS indicates we have thermal headroom.
1837 bool i915_gpu_raise(void)
1839 struct drm_i915_private *i915;
1840 struct intel_rps *rps;
1842 i915 = mchdev_get();
1846 rps = &i915->gt.rps;
1848 spin_lock_irq(&mchdev_lock);
1849 if (rps->max_freq_softlimit < rps->max_freq)
1850 rps->max_freq_softlimit++;
1851 spin_unlock_irq(&mchdev_lock);
1853 drm_dev_put(&i915->drm);
1856 EXPORT_SYMBOL_GPL(i915_gpu_raise);
1859 * i915_gpu_lower - lower GPU frequency limit
1861 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1862 * frequency maximum.
1864 bool i915_gpu_lower(void)
1866 struct drm_i915_private *i915;
1867 struct intel_rps *rps;
1869 i915 = mchdev_get();
1873 rps = &i915->gt.rps;
1875 spin_lock_irq(&mchdev_lock);
1876 if (rps->max_freq_softlimit > rps->min_freq)
1877 rps->max_freq_softlimit--;
1878 spin_unlock_irq(&mchdev_lock);
1880 drm_dev_put(&i915->drm);
1883 EXPORT_SYMBOL_GPL(i915_gpu_lower);
1886 * i915_gpu_busy - indicate GPU business to IPS
1888 * Tell the IPS driver whether or not the GPU is busy.
1890 bool i915_gpu_busy(void)
1892 struct drm_i915_private *i915;
1895 i915 = mchdev_get();
1899 ret = i915->gt.awake;
1901 drm_dev_put(&i915->drm);
1904 EXPORT_SYMBOL_GPL(i915_gpu_busy);
1907 * i915_gpu_turbo_disable - disable graphics turbo
1909 * Disable graphics turbo by resetting the max frequency and setting the
1910 * current frequency to the default.
1912 bool i915_gpu_turbo_disable(void)
1914 struct drm_i915_private *i915;
1915 struct intel_rps *rps;
1918 i915 = mchdev_get();
1922 rps = &i915->gt.rps;
1924 spin_lock_irq(&mchdev_lock);
1925 rps->max_freq_softlimit = rps->min_freq;
1926 ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
1927 spin_unlock_irq(&mchdev_lock);
1929 drm_dev_put(&i915->drm);
1932 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);