2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_device *dev)
61 struct drm_i915_private *dev_priv = dev->dev_private;
63 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
64 I915_WRITE(CHICKEN_PAR1_1,
65 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
67 I915_WRITE(GEN8_CONFIG0,
68 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
70 /* WaEnableChickenDCPR:skl,bxt,kbl */
71 I915_WRITE(GEN8_CHICKEN_DCPR_1,
72 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
74 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
75 /* WaFbcWakeMemOn:skl,bxt,kbl */
76 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
78 DISP_FBC_MEMORY_WAKE);
80 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
81 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
82 ILK_DPFC_DISABLE_DUMMY0);
85 static void bxt_init_clock_gating(struct drm_device *dev)
87 struct drm_i915_private *dev_priv = to_i915(dev);
89 gen9_init_clock_gating(dev);
91 /* WaDisableSDEUnitClockGating:bxt */
92 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
93 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
97 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
99 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
100 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
103 * Wa: Backlight PWM may stop in the asserted state, causing backlight
106 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
107 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
108 PWM1_GATING_DIS | PWM2_GATING_DIS);
111 static void i915_pineview_get_mem_freq(struct drm_device *dev)
113 struct drm_i915_private *dev_priv = to_i915(dev);
116 tmp = I915_READ(CLKCFG);
118 switch (tmp & CLKCFG_FSB_MASK) {
120 dev_priv->fsb_freq = 533; /* 133*4 */
123 dev_priv->fsb_freq = 800; /* 200*4 */
126 dev_priv->fsb_freq = 667; /* 167*4 */
129 dev_priv->fsb_freq = 400; /* 100*4 */
133 switch (tmp & CLKCFG_MEM_MASK) {
135 dev_priv->mem_freq = 533;
138 dev_priv->mem_freq = 667;
141 dev_priv->mem_freq = 800;
145 /* detect pineview DDR3 setting */
146 tmp = I915_READ(CSHRDDR3CTL);
147 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
150 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
152 struct drm_i915_private *dev_priv = to_i915(dev);
155 ddrpll = I915_READ16(DDRMPLL1);
156 csipll = I915_READ16(CSIPLL0);
158 switch (ddrpll & 0xff) {
160 dev_priv->mem_freq = 800;
163 dev_priv->mem_freq = 1066;
166 dev_priv->mem_freq = 1333;
169 dev_priv->mem_freq = 1600;
172 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
174 dev_priv->mem_freq = 0;
178 dev_priv->ips.r_t = dev_priv->mem_freq;
180 switch (csipll & 0x3ff) {
182 dev_priv->fsb_freq = 3200;
185 dev_priv->fsb_freq = 3733;
188 dev_priv->fsb_freq = 4266;
191 dev_priv->fsb_freq = 4800;
194 dev_priv->fsb_freq = 5333;
197 dev_priv->fsb_freq = 5866;
200 dev_priv->fsb_freq = 6400;
203 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
205 dev_priv->fsb_freq = 0;
209 if (dev_priv->fsb_freq == 3200) {
210 dev_priv->ips.c_m = 0;
211 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
212 dev_priv->ips.c_m = 1;
214 dev_priv->ips.c_m = 2;
218 static const struct cxsr_latency cxsr_latency_table[] = {
219 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
220 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
221 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
222 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
223 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
225 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
226 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
227 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
228 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
229 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
231 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
232 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
233 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
234 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
235 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
237 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
238 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
239 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
240 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
241 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
243 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
244 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
245 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
246 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
247 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
249 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
250 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
251 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
252 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
253 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
256 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
261 const struct cxsr_latency *latency;
264 if (fsb == 0 || mem == 0)
267 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
268 latency = &cxsr_latency_table[i];
269 if (is_desktop == latency->is_desktop &&
270 is_ddr3 == latency->is_ddr3 &&
271 fsb == latency->fsb_freq && mem == latency->mem_freq)
275 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
280 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
284 mutex_lock(&dev_priv->rps.hw_lock);
286 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
288 val &= ~FORCE_DDR_HIGH_FREQ;
290 val |= FORCE_DDR_HIGH_FREQ;
291 val &= ~FORCE_DDR_LOW_FREQ;
292 val |= FORCE_DDR_FREQ_REQ_ACK;
293 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
295 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
296 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
297 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
299 mutex_unlock(&dev_priv->rps.hw_lock);
302 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
306 mutex_lock(&dev_priv->rps.hw_lock);
308 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
310 val |= DSP_MAXFIFO_PM5_ENABLE;
312 val &= ~DSP_MAXFIFO_PM5_ENABLE;
313 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
315 mutex_unlock(&dev_priv->rps.hw_lock);
318 #define FW_WM(value, plane) \
319 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
321 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
323 struct drm_device *dev = &dev_priv->drm;
326 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
327 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
328 POSTING_READ(FW_BLC_SELF_VLV);
329 dev_priv->wm.vlv.cxsr = enable;
330 } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
331 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
332 POSTING_READ(FW_BLC_SELF);
333 } else if (IS_PINEVIEW(dev)) {
334 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
335 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
336 I915_WRITE(DSPFW3, val);
337 POSTING_READ(DSPFW3);
338 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
339 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
340 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
341 I915_WRITE(FW_BLC_SELF, val);
342 POSTING_READ(FW_BLC_SELF);
343 } else if (IS_I915GM(dev_priv)) {
345 * FIXME can't find a bit like this for 915G, and
346 * and yet it does have the related watermark in
347 * FW_BLC_SELF. What's going on?
349 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
350 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
351 I915_WRITE(INSTPM, val);
352 POSTING_READ(INSTPM);
357 DRM_DEBUG_KMS("memory self-refresh is %s\n",
358 enable ? "enabled" : "disabled");
363 * Latency for FIFO fetches is dependent on several factors:
364 * - memory configuration (speed, channels)
366 * - current MCH state
367 * It can be fairly high in some situations, so here we assume a fairly
368 * pessimal value. It's a tradeoff between extra memory fetches (if we
369 * set this value too high, the FIFO will fetch frequently to stay full)
370 * and power consumption (set it too low to save power and we might see
371 * FIFO underruns and display "flicker").
373 * A value of 5us seems to be a good balance; safe for very low end
374 * platforms but not overly aggressive on lower latency configs.
376 static const int pessimal_latency_ns = 5000;
378 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
379 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
381 static int vlv_get_fifo_size(struct drm_device *dev,
382 enum pipe pipe, int plane)
384 struct drm_i915_private *dev_priv = to_i915(dev);
385 int sprite0_start, sprite1_start, size;
388 uint32_t dsparb, dsparb2, dsparb3;
390 dsparb = I915_READ(DSPARB);
391 dsparb2 = I915_READ(DSPARB2);
392 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
393 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
396 dsparb = I915_READ(DSPARB);
397 dsparb2 = I915_READ(DSPARB2);
398 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
399 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
402 dsparb2 = I915_READ(DSPARB2);
403 dsparb3 = I915_READ(DSPARB3);
404 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
405 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
413 size = sprite0_start;
416 size = sprite1_start - sprite0_start;
419 size = 512 - 1 - sprite1_start;
425 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
426 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
427 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
433 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
435 struct drm_i915_private *dev_priv = to_i915(dev);
436 uint32_t dsparb = I915_READ(DSPARB);
439 size = dsparb & 0x7f;
441 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
443 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
444 plane ? "B" : "A", size);
449 static int i830_get_fifo_size(struct drm_device *dev, int plane)
451 struct drm_i915_private *dev_priv = to_i915(dev);
452 uint32_t dsparb = I915_READ(DSPARB);
455 size = dsparb & 0x1ff;
457 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
458 size >>= 1; /* Convert to cachelines */
460 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
461 plane ? "B" : "A", size);
466 static int i845_get_fifo_size(struct drm_device *dev, int plane)
468 struct drm_i915_private *dev_priv = to_i915(dev);
469 uint32_t dsparb = I915_READ(DSPARB);
472 size = dsparb & 0x7f;
473 size >>= 2; /* Convert to cachelines */
475 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
482 /* Pineview has different values for various configs */
483 static const struct intel_watermark_params pineview_display_wm = {
484 .fifo_size = PINEVIEW_DISPLAY_FIFO,
485 .max_wm = PINEVIEW_MAX_WM,
486 .default_wm = PINEVIEW_DFT_WM,
487 .guard_size = PINEVIEW_GUARD_WM,
488 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
490 static const struct intel_watermark_params pineview_display_hplloff_wm = {
491 .fifo_size = PINEVIEW_DISPLAY_FIFO,
492 .max_wm = PINEVIEW_MAX_WM,
493 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
494 .guard_size = PINEVIEW_GUARD_WM,
495 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
497 static const struct intel_watermark_params pineview_cursor_wm = {
498 .fifo_size = PINEVIEW_CURSOR_FIFO,
499 .max_wm = PINEVIEW_CURSOR_MAX_WM,
500 .default_wm = PINEVIEW_CURSOR_DFT_WM,
501 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
502 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
504 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
505 .fifo_size = PINEVIEW_CURSOR_FIFO,
506 .max_wm = PINEVIEW_CURSOR_MAX_WM,
507 .default_wm = PINEVIEW_CURSOR_DFT_WM,
508 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
509 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
511 static const struct intel_watermark_params g4x_wm_info = {
512 .fifo_size = G4X_FIFO_SIZE,
513 .max_wm = G4X_MAX_WM,
514 .default_wm = G4X_MAX_WM,
516 .cacheline_size = G4X_FIFO_LINE_SIZE,
518 static const struct intel_watermark_params g4x_cursor_wm_info = {
519 .fifo_size = I965_CURSOR_FIFO,
520 .max_wm = I965_CURSOR_MAX_WM,
521 .default_wm = I965_CURSOR_DFT_WM,
523 .cacheline_size = G4X_FIFO_LINE_SIZE,
525 static const struct intel_watermark_params i965_cursor_wm_info = {
526 .fifo_size = I965_CURSOR_FIFO,
527 .max_wm = I965_CURSOR_MAX_WM,
528 .default_wm = I965_CURSOR_DFT_WM,
530 .cacheline_size = I915_FIFO_LINE_SIZE,
532 static const struct intel_watermark_params i945_wm_info = {
533 .fifo_size = I945_FIFO_SIZE,
534 .max_wm = I915_MAX_WM,
537 .cacheline_size = I915_FIFO_LINE_SIZE,
539 static const struct intel_watermark_params i915_wm_info = {
540 .fifo_size = I915_FIFO_SIZE,
541 .max_wm = I915_MAX_WM,
544 .cacheline_size = I915_FIFO_LINE_SIZE,
546 static const struct intel_watermark_params i830_a_wm_info = {
547 .fifo_size = I855GM_FIFO_SIZE,
548 .max_wm = I915_MAX_WM,
551 .cacheline_size = I830_FIFO_LINE_SIZE,
553 static const struct intel_watermark_params i830_bc_wm_info = {
554 .fifo_size = I855GM_FIFO_SIZE,
555 .max_wm = I915_MAX_WM/2,
558 .cacheline_size = I830_FIFO_LINE_SIZE,
560 static const struct intel_watermark_params i845_wm_info = {
561 .fifo_size = I830_FIFO_SIZE,
562 .max_wm = I915_MAX_WM,
565 .cacheline_size = I830_FIFO_LINE_SIZE,
569 * intel_calculate_wm - calculate watermark level
570 * @clock_in_khz: pixel clock
571 * @wm: chip FIFO params
572 * @cpp: bytes per pixel
573 * @latency_ns: memory latency for the platform
575 * Calculate the watermark level (the level at which the display plane will
576 * start fetching from memory again). Each chip has a different display
577 * FIFO size and allocation, so the caller needs to figure that out and pass
578 * in the correct intel_watermark_params structure.
580 * As the pixel clock runs, the FIFO will be drained at a rate that depends
581 * on the pixel size. When it reaches the watermark level, it'll start
582 * fetching FIFO line sized based chunks from memory until the FIFO fills
583 * past the watermark point. If the FIFO drains completely, a FIFO underrun
584 * will occur, and a display engine hang could result.
586 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
587 const struct intel_watermark_params *wm,
588 int fifo_size, int cpp,
589 unsigned long latency_ns)
591 long entries_required, wm_size;
594 * Note: we need to make sure we don't overflow for various clock &
596 * clocks go from a few thousand to several hundred thousand.
597 * latency is usually a few thousand
599 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
601 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
603 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
605 wm_size = fifo_size - (entries_required + wm->guard_size);
607 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
609 /* Don't promote wm_size to unsigned... */
610 if (wm_size > (long)wm->max_wm)
611 wm_size = wm->max_wm;
613 wm_size = wm->default_wm;
616 * Bspec seems to indicate that the value shouldn't be lower than
617 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
618 * Lets go for 8 which is the burst size since certain platforms
619 * already use a hardcoded 8 (which is what the spec says should be
628 static struct intel_crtc *single_enabled_crtc(struct drm_device *dev)
630 struct intel_crtc *crtc, *enabled = NULL;
632 for_each_intel_crtc(dev, crtc) {
633 if (intel_crtc_active(crtc)) {
643 static void pineview_update_wm(struct intel_crtc *unused_crtc)
645 struct drm_device *dev = unused_crtc->base.dev;
646 struct drm_i915_private *dev_priv = to_i915(dev);
647 struct intel_crtc *crtc;
648 const struct cxsr_latency *latency;
652 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
657 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
658 intel_set_memory_cxsr(dev_priv, false);
662 crtc = single_enabled_crtc(dev);
664 const struct drm_display_mode *adjusted_mode =
665 &crtc->config->base.adjusted_mode;
666 const struct drm_framebuffer *fb =
667 crtc->base.primary->state->fb;
668 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
669 int clock = adjusted_mode->crtc_clock;
672 wm = intel_calculate_wm(clock, &pineview_display_wm,
673 pineview_display_wm.fifo_size,
674 cpp, latency->display_sr);
675 reg = I915_READ(DSPFW1);
676 reg &= ~DSPFW_SR_MASK;
677 reg |= FW_WM(wm, SR);
678 I915_WRITE(DSPFW1, reg);
679 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
682 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
683 pineview_display_wm.fifo_size,
684 cpp, latency->cursor_sr);
685 reg = I915_READ(DSPFW3);
686 reg &= ~DSPFW_CURSOR_SR_MASK;
687 reg |= FW_WM(wm, CURSOR_SR);
688 I915_WRITE(DSPFW3, reg);
690 /* Display HPLL off SR */
691 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
692 pineview_display_hplloff_wm.fifo_size,
693 cpp, latency->display_hpll_disable);
694 reg = I915_READ(DSPFW3);
695 reg &= ~DSPFW_HPLL_SR_MASK;
696 reg |= FW_WM(wm, HPLL_SR);
697 I915_WRITE(DSPFW3, reg);
699 /* cursor HPLL off SR */
700 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
701 pineview_display_hplloff_wm.fifo_size,
702 cpp, latency->cursor_hpll_disable);
703 reg = I915_READ(DSPFW3);
704 reg &= ~DSPFW_HPLL_CURSOR_MASK;
705 reg |= FW_WM(wm, HPLL_CURSOR);
706 I915_WRITE(DSPFW3, reg);
707 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
709 intel_set_memory_cxsr(dev_priv, true);
711 intel_set_memory_cxsr(dev_priv, false);
715 static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
717 const struct intel_watermark_params *display,
718 int display_latency_ns,
719 const struct intel_watermark_params *cursor,
720 int cursor_latency_ns,
724 struct intel_crtc *crtc;
725 const struct drm_display_mode *adjusted_mode;
726 const struct drm_framebuffer *fb;
727 int htotal, hdisplay, clock, cpp;
728 int line_time_us, line_count;
729 int entries, tlb_miss;
731 crtc = intel_get_crtc_for_plane(dev_priv, plane);
732 if (!intel_crtc_active(crtc)) {
733 *cursor_wm = cursor->guard_size;
734 *plane_wm = display->guard_size;
738 adjusted_mode = &crtc->config->base.adjusted_mode;
739 fb = crtc->base.primary->state->fb;
740 clock = adjusted_mode->crtc_clock;
741 htotal = adjusted_mode->crtc_htotal;
742 hdisplay = crtc->config->pipe_src_w;
743 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
745 /* Use the small buffer method to calculate plane watermark */
746 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
747 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
750 entries = DIV_ROUND_UP(entries, display->cacheline_size);
751 *plane_wm = entries + display->guard_size;
752 if (*plane_wm > (int)display->max_wm)
753 *plane_wm = display->max_wm;
755 /* Use the large buffer method to calculate cursor watermark */
756 line_time_us = max(htotal * 1000 / clock, 1);
757 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
758 entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
759 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
762 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
763 *cursor_wm = entries + cursor->guard_size;
764 if (*cursor_wm > (int)cursor->max_wm)
765 *cursor_wm = (int)cursor->max_wm;
771 * Check the wm result.
773 * If any calculated watermark values is larger than the maximum value that
774 * can be programmed into the associated watermark register, that watermark
777 static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
778 int display_wm, int cursor_wm,
779 const struct intel_watermark_params *display,
780 const struct intel_watermark_params *cursor)
782 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
783 display_wm, cursor_wm);
785 if (display_wm > display->max_wm) {
786 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
787 display_wm, display->max_wm);
791 if (cursor_wm > cursor->max_wm) {
792 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
793 cursor_wm, cursor->max_wm);
797 if (!(display_wm || cursor_wm)) {
798 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
805 static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
808 const struct intel_watermark_params *display,
809 const struct intel_watermark_params *cursor,
810 int *display_wm, int *cursor_wm)
812 struct intel_crtc *crtc;
813 const struct drm_display_mode *adjusted_mode;
814 const struct drm_framebuffer *fb;
815 int hdisplay, htotal, cpp, clock;
816 unsigned long line_time_us;
817 int line_count, line_size;
822 *display_wm = *cursor_wm = 0;
826 crtc = intel_get_crtc_for_plane(dev_priv, plane);
827 adjusted_mode = &crtc->config->base.adjusted_mode;
828 fb = crtc->base.primary->state->fb;
829 clock = adjusted_mode->crtc_clock;
830 htotal = adjusted_mode->crtc_htotal;
831 hdisplay = crtc->config->pipe_src_w;
832 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
834 line_time_us = max(htotal * 1000 / clock, 1);
835 line_count = (latency_ns / line_time_us + 1000) / 1000;
836 line_size = hdisplay * cpp;
838 /* Use the minimum of the small and large buffer method for primary */
839 small = ((clock * cpp / 1000) * latency_ns) / 1000;
840 large = line_count * line_size;
842 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
843 *display_wm = entries + display->guard_size;
845 /* calculate the self-refresh watermark for display cursor */
846 entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
847 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
848 *cursor_wm = entries + cursor->guard_size;
850 return g4x_check_srwm(dev_priv,
851 *display_wm, *cursor_wm,
855 #define FW_WM_VLV(value, plane) \
856 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
858 static void vlv_write_wm_values(struct intel_crtc *crtc,
859 const struct vlv_wm_values *wm)
861 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
862 enum pipe pipe = crtc->pipe;
864 I915_WRITE(VLV_DDL(pipe),
865 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
866 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
867 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
868 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
871 FW_WM(wm->sr.plane, SR) |
872 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
873 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
874 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
876 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
877 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
878 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
880 FW_WM(wm->sr.cursor, CURSOR_SR));
882 if (IS_CHERRYVIEW(dev_priv)) {
883 I915_WRITE(DSPFW7_CHV,
884 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
885 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
886 I915_WRITE(DSPFW8_CHV,
887 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
888 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
889 I915_WRITE(DSPFW9_CHV,
890 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
891 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
893 FW_WM(wm->sr.plane >> 9, SR_HI) |
894 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
895 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
896 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
897 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
898 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
899 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
900 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
901 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
902 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
905 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
906 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
908 FW_WM(wm->sr.plane >> 9, SR_HI) |
909 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
910 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
911 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
912 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
913 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
914 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
917 /* zero (unused) WM1 watermarks */
918 I915_WRITE(DSPFW4, 0);
919 I915_WRITE(DSPFW5, 0);
920 I915_WRITE(DSPFW6, 0);
921 I915_WRITE(DSPHOWM1, 0);
923 POSTING_READ(DSPFW1);
931 VLV_WM_LEVEL_DDR_DVFS,
934 /* latency must be in 0.1us units. */
935 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
936 unsigned int pipe_htotal,
937 unsigned int horiz_pixels,
939 unsigned int latency)
943 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
944 ret = (ret + 1) * horiz_pixels * cpp;
945 ret = DIV_ROUND_UP(ret, 64);
950 static void vlv_setup_wm_latency(struct drm_device *dev)
952 struct drm_i915_private *dev_priv = to_i915(dev);
954 /* all latencies in usec */
955 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
957 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
959 if (IS_CHERRYVIEW(dev_priv)) {
960 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
961 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
963 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
967 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
968 struct intel_crtc *crtc,
969 const struct intel_plane_state *state,
972 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
973 int clock, htotal, cpp, width, wm;
975 if (dev_priv->wm.pri_latency[level] == 0)
978 if (!state->base.visible)
981 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
982 clock = crtc->config->base.adjusted_mode.crtc_clock;
983 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
984 width = crtc->config->pipe_src_w;
985 if (WARN_ON(htotal == 0))
988 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
990 * FIXME the formula gives values that are
991 * too big for the cursor FIFO, and hence we
992 * would never be able to use cursors. For
993 * now just hardcode the watermark.
997 wm = vlv_wm_method2(clock, htotal, width, cpp,
998 dev_priv->wm.pri_latency[level] * 10);
1001 return min_t(int, wm, USHRT_MAX);
1004 static void vlv_compute_fifo(struct intel_crtc *crtc)
1006 struct drm_device *dev = crtc->base.dev;
1007 struct vlv_wm_state *wm_state = &crtc->wm_state;
1008 struct intel_plane *plane;
1009 unsigned int total_rate = 0;
1010 const int fifo_size = 512 - 1;
1011 int fifo_extra, fifo_left = fifo_size;
1013 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1014 struct intel_plane_state *state =
1015 to_intel_plane_state(plane->base.state);
1017 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1020 if (state->base.visible) {
1021 wm_state->num_active_planes++;
1022 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1026 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1027 struct intel_plane_state *state =
1028 to_intel_plane_state(plane->base.state);
1031 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1032 plane->wm.fifo_size = 63;
1036 if (!state->base.visible) {
1037 plane->wm.fifo_size = 0;
1041 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1042 plane->wm.fifo_size = fifo_size * rate / total_rate;
1043 fifo_left -= plane->wm.fifo_size;
1046 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1048 /* spread the remainder evenly */
1049 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1055 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1058 /* give it all to the first plane if none are active */
1059 if (plane->wm.fifo_size == 0 &&
1060 wm_state->num_active_planes)
1063 plane_extra = min(fifo_extra, fifo_left);
1064 plane->wm.fifo_size += plane_extra;
1065 fifo_left -= plane_extra;
1068 WARN_ON(fifo_left != 0);
1071 static void vlv_invert_wms(struct intel_crtc *crtc)
1073 struct vlv_wm_state *wm_state = &crtc->wm_state;
1076 for (level = 0; level < wm_state->num_levels; level++) {
1077 struct drm_device *dev = crtc->base.dev;
1078 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1079 struct intel_plane *plane;
1081 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1082 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1084 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1085 switch (plane->base.type) {
1087 case DRM_PLANE_TYPE_CURSOR:
1088 wm_state->wm[level].cursor = plane->wm.fifo_size -
1089 wm_state->wm[level].cursor;
1091 case DRM_PLANE_TYPE_PRIMARY:
1092 wm_state->wm[level].primary = plane->wm.fifo_size -
1093 wm_state->wm[level].primary;
1095 case DRM_PLANE_TYPE_OVERLAY:
1096 sprite = plane->plane;
1097 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1098 wm_state->wm[level].sprite[sprite];
1105 static void vlv_compute_wm(struct intel_crtc *crtc)
1107 struct drm_device *dev = crtc->base.dev;
1108 struct vlv_wm_state *wm_state = &crtc->wm_state;
1109 struct intel_plane *plane;
1110 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1113 memset(wm_state, 0, sizeof(*wm_state));
1115 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1116 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1118 wm_state->num_active_planes = 0;
1120 vlv_compute_fifo(crtc);
1122 if (wm_state->num_active_planes != 1)
1123 wm_state->cxsr = false;
1125 if (wm_state->cxsr) {
1126 for (level = 0; level < wm_state->num_levels; level++) {
1127 wm_state->sr[level].plane = sr_fifo_size;
1128 wm_state->sr[level].cursor = 63;
1132 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1133 struct intel_plane_state *state =
1134 to_intel_plane_state(plane->base.state);
1136 if (!state->base.visible)
1139 /* normal watermarks */
1140 for (level = 0; level < wm_state->num_levels; level++) {
1141 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1142 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1145 if (WARN_ON(level == 0 && wm > max_wm))
1148 if (wm > plane->wm.fifo_size)
1151 switch (plane->base.type) {
1153 case DRM_PLANE_TYPE_CURSOR:
1154 wm_state->wm[level].cursor = wm;
1156 case DRM_PLANE_TYPE_PRIMARY:
1157 wm_state->wm[level].primary = wm;
1159 case DRM_PLANE_TYPE_OVERLAY:
1160 sprite = plane->plane;
1161 wm_state->wm[level].sprite[sprite] = wm;
1166 wm_state->num_levels = level;
1168 if (!wm_state->cxsr)
1171 /* maxfifo watermarks */
1172 switch (plane->base.type) {
1174 case DRM_PLANE_TYPE_CURSOR:
1175 for (level = 0; level < wm_state->num_levels; level++)
1176 wm_state->sr[level].cursor =
1177 wm_state->wm[level].cursor;
1179 case DRM_PLANE_TYPE_PRIMARY:
1180 for (level = 0; level < wm_state->num_levels; level++)
1181 wm_state->sr[level].plane =
1182 min(wm_state->sr[level].plane,
1183 wm_state->wm[level].primary);
1185 case DRM_PLANE_TYPE_OVERLAY:
1186 sprite = plane->plane;
1187 for (level = 0; level < wm_state->num_levels; level++)
1188 wm_state->sr[level].plane =
1189 min(wm_state->sr[level].plane,
1190 wm_state->wm[level].sprite[sprite]);
1195 /* clear any (partially) filled invalid levels */
1196 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1197 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1198 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1201 vlv_invert_wms(crtc);
1204 #define VLV_FIFO(plane, value) \
1205 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1207 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1209 struct drm_device *dev = crtc->base.dev;
1210 struct drm_i915_private *dev_priv = to_i915(dev);
1211 struct intel_plane *plane;
1212 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1214 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1215 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1216 WARN_ON(plane->wm.fifo_size != 63);
1220 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1221 sprite0_start = plane->wm.fifo_size;
1222 else if (plane->plane == 0)
1223 sprite1_start = sprite0_start + plane->wm.fifo_size;
1225 fifo_size = sprite1_start + plane->wm.fifo_size;
1228 WARN_ON(fifo_size != 512 - 1);
1230 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1231 pipe_name(crtc->pipe), sprite0_start,
1232 sprite1_start, fifo_size);
1234 switch (crtc->pipe) {
1235 uint32_t dsparb, dsparb2, dsparb3;
1237 dsparb = I915_READ(DSPARB);
1238 dsparb2 = I915_READ(DSPARB2);
1240 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1241 VLV_FIFO(SPRITEB, 0xff));
1242 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1243 VLV_FIFO(SPRITEB, sprite1_start));
1245 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1246 VLV_FIFO(SPRITEB_HI, 0x1));
1247 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1248 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1250 I915_WRITE(DSPARB, dsparb);
1251 I915_WRITE(DSPARB2, dsparb2);
1254 dsparb = I915_READ(DSPARB);
1255 dsparb2 = I915_READ(DSPARB2);
1257 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1258 VLV_FIFO(SPRITED, 0xff));
1259 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1260 VLV_FIFO(SPRITED, sprite1_start));
1262 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1263 VLV_FIFO(SPRITED_HI, 0xff));
1264 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1265 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1267 I915_WRITE(DSPARB, dsparb);
1268 I915_WRITE(DSPARB2, dsparb2);
1271 dsparb3 = I915_READ(DSPARB3);
1272 dsparb2 = I915_READ(DSPARB2);
1274 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1275 VLV_FIFO(SPRITEF, 0xff));
1276 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1277 VLV_FIFO(SPRITEF, sprite1_start));
1279 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1280 VLV_FIFO(SPRITEF_HI, 0xff));
1281 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1282 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1284 I915_WRITE(DSPARB3, dsparb3);
1285 I915_WRITE(DSPARB2, dsparb2);
1294 static void vlv_merge_wm(struct drm_device *dev,
1295 struct vlv_wm_values *wm)
1297 struct intel_crtc *crtc;
1298 int num_active_crtcs = 0;
1300 wm->level = to_i915(dev)->wm.max_level;
1303 for_each_intel_crtc(dev, crtc) {
1304 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1309 if (!wm_state->cxsr)
1313 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1316 if (num_active_crtcs != 1)
1319 if (num_active_crtcs > 1)
1320 wm->level = VLV_WM_LEVEL_PM2;
1322 for_each_intel_crtc(dev, crtc) {
1323 struct vlv_wm_state *wm_state = &crtc->wm_state;
1324 enum pipe pipe = crtc->pipe;
1329 wm->pipe[pipe] = wm_state->wm[wm->level];
1331 wm->sr = wm_state->sr[wm->level];
1333 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1334 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1335 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1336 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1340 static void vlv_update_wm(struct intel_crtc *crtc)
1342 struct drm_device *dev = crtc->base.dev;
1343 struct drm_i915_private *dev_priv = to_i915(dev);
1344 enum pipe pipe = crtc->pipe;
1345 struct vlv_wm_values wm = {};
1347 vlv_compute_wm(crtc);
1348 vlv_merge_wm(dev, &wm);
1350 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1351 /* FIXME should be part of crtc atomic commit */
1352 vlv_pipe_set_fifo_size(crtc);
1356 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1357 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1358 chv_set_memory_dvfs(dev_priv, false);
1360 if (wm.level < VLV_WM_LEVEL_PM5 &&
1361 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1362 chv_set_memory_pm5(dev_priv, false);
1364 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1365 intel_set_memory_cxsr(dev_priv, false);
1367 /* FIXME should be part of crtc atomic commit */
1368 vlv_pipe_set_fifo_size(crtc);
1370 vlv_write_wm_values(crtc, &wm);
1372 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1373 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1374 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1375 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1376 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1378 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1379 intel_set_memory_cxsr(dev_priv, true);
1381 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1382 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1383 chv_set_memory_pm5(dev_priv, true);
1385 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1386 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1387 chv_set_memory_dvfs(dev_priv, true);
1389 dev_priv->wm.vlv = wm;
1392 #define single_plane_enabled(mask) is_power_of_2(mask)
1394 static void g4x_update_wm(struct intel_crtc *crtc)
1396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1397 static const int sr_latency_ns = 12000;
1398 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1399 int plane_sr, cursor_sr;
1400 unsigned int enabled = 0;
1403 if (g4x_compute_wm0(dev_priv, PIPE_A,
1404 &g4x_wm_info, pessimal_latency_ns,
1405 &g4x_cursor_wm_info, pessimal_latency_ns,
1406 &planea_wm, &cursora_wm))
1407 enabled |= 1 << PIPE_A;
1409 if (g4x_compute_wm0(dev_priv, PIPE_B,
1410 &g4x_wm_info, pessimal_latency_ns,
1411 &g4x_cursor_wm_info, pessimal_latency_ns,
1412 &planeb_wm, &cursorb_wm))
1413 enabled |= 1 << PIPE_B;
1415 if (single_plane_enabled(enabled) &&
1416 g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
1419 &g4x_cursor_wm_info,
1420 &plane_sr, &cursor_sr)) {
1421 cxsr_enabled = true;
1423 cxsr_enabled = false;
1424 intel_set_memory_cxsr(dev_priv, false);
1425 plane_sr = cursor_sr = 0;
1428 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1429 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1430 planea_wm, cursora_wm,
1431 planeb_wm, cursorb_wm,
1432 plane_sr, cursor_sr);
1435 FW_WM(plane_sr, SR) |
1436 FW_WM(cursorb_wm, CURSORB) |
1437 FW_WM(planeb_wm, PLANEB) |
1438 FW_WM(planea_wm, PLANEA));
1440 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1441 FW_WM(cursora_wm, CURSORA));
1442 /* HPLL off in SR has some issues on G4x... disable it */
1444 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1445 FW_WM(cursor_sr, CURSOR_SR));
1448 intel_set_memory_cxsr(dev_priv, true);
1451 static void i965_update_wm(struct intel_crtc *unused_crtc)
1453 struct drm_device *dev = unused_crtc->base.dev;
1454 struct drm_i915_private *dev_priv = to_i915(dev);
1455 struct intel_crtc *crtc;
1460 /* Calc sr entries for one plane configs */
1461 crtc = single_enabled_crtc(dev);
1463 /* self-refresh has much higher latency */
1464 static const int sr_latency_ns = 12000;
1465 const struct drm_display_mode *adjusted_mode =
1466 &crtc->config->base.adjusted_mode;
1467 const struct drm_framebuffer *fb =
1468 crtc->base.primary->state->fb;
1469 int clock = adjusted_mode->crtc_clock;
1470 int htotal = adjusted_mode->crtc_htotal;
1471 int hdisplay = crtc->config->pipe_src_w;
1472 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1473 unsigned long line_time_us;
1476 line_time_us = max(htotal * 1000 / clock, 1);
1478 /* Use ns/us then divide to preserve precision */
1479 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1481 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1482 srwm = I965_FIFO_SIZE - entries;
1486 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1489 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1490 cpp * crtc->base.cursor->state->crtc_w;
1491 entries = DIV_ROUND_UP(entries,
1492 i965_cursor_wm_info.cacheline_size);
1493 cursor_sr = i965_cursor_wm_info.fifo_size -
1494 (entries + i965_cursor_wm_info.guard_size);
1496 if (cursor_sr > i965_cursor_wm_info.max_wm)
1497 cursor_sr = i965_cursor_wm_info.max_wm;
1499 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1500 "cursor %d\n", srwm, cursor_sr);
1502 cxsr_enabled = true;
1504 cxsr_enabled = false;
1505 /* Turn off self refresh if both pipes are enabled */
1506 intel_set_memory_cxsr(dev_priv, false);
1509 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1512 /* 965 has limitations... */
1513 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1517 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1518 FW_WM(8, PLANEC_OLD));
1519 /* update cursor SR watermark */
1520 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1523 intel_set_memory_cxsr(dev_priv, true);
1528 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
1530 struct drm_device *dev = unused_crtc->base.dev;
1531 struct drm_i915_private *dev_priv = to_i915(dev);
1532 const struct intel_watermark_params *wm_info;
1537 int planea_wm, planeb_wm;
1538 struct intel_crtc *crtc, *enabled = NULL;
1541 wm_info = &i945_wm_info;
1542 else if (!IS_GEN2(dev_priv))
1543 wm_info = &i915_wm_info;
1545 wm_info = &i830_a_wm_info;
1547 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1548 crtc = intel_get_crtc_for_plane(dev_priv, 0);
1549 if (intel_crtc_active(crtc)) {
1550 const struct drm_display_mode *adjusted_mode =
1551 &crtc->config->base.adjusted_mode;
1552 const struct drm_framebuffer *fb =
1553 crtc->base.primary->state->fb;
1556 if (IS_GEN2(dev_priv))
1559 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1561 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1562 wm_info, fifo_size, cpp,
1563 pessimal_latency_ns);
1566 planea_wm = fifo_size - wm_info->guard_size;
1567 if (planea_wm > (long)wm_info->max_wm)
1568 planea_wm = wm_info->max_wm;
1571 if (IS_GEN2(dev_priv))
1572 wm_info = &i830_bc_wm_info;
1574 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1575 crtc = intel_get_crtc_for_plane(dev_priv, 1);
1576 if (intel_crtc_active(crtc)) {
1577 const struct drm_display_mode *adjusted_mode =
1578 &crtc->config->base.adjusted_mode;
1579 const struct drm_framebuffer *fb =
1580 crtc->base.primary->state->fb;
1583 if (IS_GEN2(dev_priv))
1586 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1588 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1589 wm_info, fifo_size, cpp,
1590 pessimal_latency_ns);
1591 if (enabled == NULL)
1596 planeb_wm = fifo_size - wm_info->guard_size;
1597 if (planeb_wm > (long)wm_info->max_wm)
1598 planeb_wm = wm_info->max_wm;
1601 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1603 if (IS_I915GM(dev_priv) && enabled) {
1604 struct drm_i915_gem_object *obj;
1606 obj = intel_fb_obj(enabled->base.primary->state->fb);
1608 /* self-refresh seems busted with untiled */
1609 if (!i915_gem_object_is_tiled(obj))
1614 * Overlay gets an aggressive default since video jitter is bad.
1618 /* Play safe and disable self-refresh before adjusting watermarks. */
1619 intel_set_memory_cxsr(dev_priv, false);
1621 /* Calc sr entries for one plane configs */
1622 if (HAS_FW_BLC(dev) && enabled) {
1623 /* self-refresh has much higher latency */
1624 static const int sr_latency_ns = 6000;
1625 const struct drm_display_mode *adjusted_mode =
1626 &enabled->config->base.adjusted_mode;
1627 const struct drm_framebuffer *fb =
1628 enabled->base.primary->state->fb;
1629 int clock = adjusted_mode->crtc_clock;
1630 int htotal = adjusted_mode->crtc_htotal;
1631 int hdisplay = enabled->config->pipe_src_w;
1633 unsigned long line_time_us;
1636 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
1639 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1641 line_time_us = max(htotal * 1000 / clock, 1);
1643 /* Use ns/us then divide to preserve precision */
1644 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1646 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1647 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1648 srwm = wm_info->fifo_size - entries;
1652 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1653 I915_WRITE(FW_BLC_SELF,
1654 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1656 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1659 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1660 planea_wm, planeb_wm, cwm, srwm);
1662 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1663 fwater_hi = (cwm & 0x1f);
1665 /* Set request length to 8 cachelines per fetch */
1666 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1667 fwater_hi = fwater_hi | (1 << 8);
1669 I915_WRITE(FW_BLC, fwater_lo);
1670 I915_WRITE(FW_BLC2, fwater_hi);
1673 intel_set_memory_cxsr(dev_priv, true);
1676 static void i845_update_wm(struct intel_crtc *unused_crtc)
1678 struct drm_device *dev = unused_crtc->base.dev;
1679 struct drm_i915_private *dev_priv = to_i915(dev);
1680 struct intel_crtc *crtc;
1681 const struct drm_display_mode *adjusted_mode;
1685 crtc = single_enabled_crtc(dev);
1689 adjusted_mode = &crtc->config->base.adjusted_mode;
1690 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1692 dev_priv->display.get_fifo_size(dev, 0),
1693 4, pessimal_latency_ns);
1694 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1695 fwater_lo |= (3<<8) | planea_wm;
1697 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1699 I915_WRITE(FW_BLC, fwater_lo);
1702 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1704 uint32_t pixel_rate;
1706 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1708 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1709 * adjust the pixel_rate here. */
1711 if (pipe_config->pch_pfit.enabled) {
1712 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1713 uint32_t pfit_size = pipe_config->pch_pfit.size;
1715 pipe_w = pipe_config->pipe_src_w;
1716 pipe_h = pipe_config->pipe_src_h;
1718 pfit_w = (pfit_size >> 16) & 0xFFFF;
1719 pfit_h = pfit_size & 0xFFFF;
1720 if (pipe_w < pfit_w)
1722 if (pipe_h < pfit_h)
1725 if (WARN_ON(!pfit_w || !pfit_h))
1728 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1735 /* latency must be in 0.1us units. */
1736 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1740 if (WARN(latency == 0, "Latency value missing\n"))
1743 ret = (uint64_t) pixel_rate * cpp * latency;
1744 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1749 /* latency must be in 0.1us units. */
1750 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1751 uint32_t horiz_pixels, uint8_t cpp,
1756 if (WARN(latency == 0, "Latency value missing\n"))
1758 if (WARN_ON(!pipe_htotal))
1761 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1762 ret = (ret + 1) * horiz_pixels * cpp;
1763 ret = DIV_ROUND_UP(ret, 64) + 2;
1767 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1771 * Neither of these should be possible since this function shouldn't be
1772 * called if the CRTC is off or the plane is invisible. But let's be
1773 * extra paranoid to avoid a potential divide-by-zero if we screw up
1774 * elsewhere in the driver.
1778 if (WARN_ON(!horiz_pixels))
1781 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1784 struct ilk_wm_maximums {
1792 * For both WM_PIPE and WM_LP.
1793 * mem_value must be in 0.1us units.
1795 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1796 const struct intel_plane_state *pstate,
1800 int cpp = pstate->base.fb ?
1801 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1802 uint32_t method1, method2;
1804 if (!cstate->base.active || !pstate->base.visible)
1807 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1812 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1813 cstate->base.adjusted_mode.crtc_htotal,
1814 drm_rect_width(&pstate->base.dst),
1817 return min(method1, method2);
1821 * For both WM_PIPE and WM_LP.
1822 * mem_value must be in 0.1us units.
1824 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1825 const struct intel_plane_state *pstate,
1828 int cpp = pstate->base.fb ?
1829 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1830 uint32_t method1, method2;
1832 if (!cstate->base.active || !pstate->base.visible)
1835 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1836 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1837 cstate->base.adjusted_mode.crtc_htotal,
1838 drm_rect_width(&pstate->base.dst),
1840 return min(method1, method2);
1844 * For both WM_PIPE and WM_LP.
1845 * mem_value must be in 0.1us units.
1847 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1848 const struct intel_plane_state *pstate,
1852 * We treat the cursor plane as always-on for the purposes of watermark
1853 * calculation. Until we have two-stage watermark programming merged,
1854 * this is necessary to avoid flickering.
1857 int width = pstate->base.visible ? pstate->base.crtc_w : 64;
1859 if (!cstate->base.active)
1862 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1863 cstate->base.adjusted_mode.crtc_htotal,
1864 width, cpp, mem_value);
1867 /* Only for WM_LP. */
1868 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1869 const struct intel_plane_state *pstate,
1872 int cpp = pstate->base.fb ?
1873 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1875 if (!cstate->base.active || !pstate->base.visible)
1878 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
1881 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1883 if (INTEL_INFO(dev)->gen >= 8)
1885 else if (INTEL_INFO(dev)->gen >= 7)
1891 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1892 int level, bool is_sprite)
1894 if (INTEL_INFO(dev)->gen >= 8)
1895 /* BDW primary/sprite plane watermarks */
1896 return level == 0 ? 255 : 2047;
1897 else if (INTEL_INFO(dev)->gen >= 7)
1898 /* IVB/HSW primary/sprite plane watermarks */
1899 return level == 0 ? 127 : 1023;
1900 else if (!is_sprite)
1901 /* ILK/SNB primary plane watermarks */
1902 return level == 0 ? 127 : 511;
1904 /* ILK/SNB sprite plane watermarks */
1905 return level == 0 ? 63 : 255;
1908 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1911 if (INTEL_INFO(dev)->gen >= 7)
1912 return level == 0 ? 63 : 255;
1914 return level == 0 ? 31 : 63;
1917 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1919 if (INTEL_INFO(dev)->gen >= 8)
1925 /* Calculate the maximum primary/sprite plane watermark */
1926 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1928 const struct intel_wm_config *config,
1929 enum intel_ddb_partitioning ddb_partitioning,
1932 unsigned int fifo_size = ilk_display_fifo_size(dev);
1934 /* if sprites aren't enabled, sprites get nothing */
1935 if (is_sprite && !config->sprites_enabled)
1938 /* HSW allows LP1+ watermarks even with multiple pipes */
1939 if (level == 0 || config->num_pipes_active > 1) {
1940 fifo_size /= INTEL_INFO(dev)->num_pipes;
1943 * For some reason the non self refresh
1944 * FIFO size is only half of the self
1945 * refresh FIFO size on ILK/SNB.
1947 if (INTEL_INFO(dev)->gen <= 6)
1951 if (config->sprites_enabled) {
1952 /* level 0 is always calculated with 1:1 split */
1953 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1962 /* clamp to max that the registers can hold */
1963 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1966 /* Calculate the maximum cursor plane watermark */
1967 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1969 const struct intel_wm_config *config)
1971 /* HSW LP1+ watermarks w/ multiple pipes */
1972 if (level > 0 && config->num_pipes_active > 1)
1975 /* otherwise just report max that registers can hold */
1976 return ilk_cursor_wm_reg_max(dev, level);
1979 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1981 const struct intel_wm_config *config,
1982 enum intel_ddb_partitioning ddb_partitioning,
1983 struct ilk_wm_maximums *max)
1985 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1986 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1987 max->cur = ilk_cursor_wm_max(dev, level, config);
1988 max->fbc = ilk_fbc_wm_reg_max(dev);
1991 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1993 struct ilk_wm_maximums *max)
1995 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1996 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1997 max->cur = ilk_cursor_wm_reg_max(dev, level);
1998 max->fbc = ilk_fbc_wm_reg_max(dev);
2001 static bool ilk_validate_wm_level(int level,
2002 const struct ilk_wm_maximums *max,
2003 struct intel_wm_level *result)
2007 /* already determined to be invalid? */
2008 if (!result->enable)
2011 result->enable = result->pri_val <= max->pri &&
2012 result->spr_val <= max->spr &&
2013 result->cur_val <= max->cur;
2015 ret = result->enable;
2018 * HACK until we can pre-compute everything,
2019 * and thus fail gracefully if LP0 watermarks
2022 if (level == 0 && !result->enable) {
2023 if (result->pri_val > max->pri)
2024 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2025 level, result->pri_val, max->pri);
2026 if (result->spr_val > max->spr)
2027 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2028 level, result->spr_val, max->spr);
2029 if (result->cur_val > max->cur)
2030 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2031 level, result->cur_val, max->cur);
2033 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2034 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2035 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2036 result->enable = true;
2042 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2043 const struct intel_crtc *intel_crtc,
2045 struct intel_crtc_state *cstate,
2046 struct intel_plane_state *pristate,
2047 struct intel_plane_state *sprstate,
2048 struct intel_plane_state *curstate,
2049 struct intel_wm_level *result)
2051 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2052 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2053 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2055 /* WM1+ latency values stored in 0.5us units */
2063 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2064 pri_latency, level);
2065 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2069 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2072 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2074 result->enable = true;
2078 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2080 const struct intel_atomic_state *intel_state =
2081 to_intel_atomic_state(cstate->base.state);
2082 const struct drm_display_mode *adjusted_mode =
2083 &cstate->base.adjusted_mode;
2084 u32 linetime, ips_linetime;
2086 if (!cstate->base.active)
2088 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2090 if (WARN_ON(intel_state->cdclk == 0))
2093 /* The WM are computed with base on how long it takes to fill a single
2094 * row at the given clock rate, multiplied by 8.
2096 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2097 adjusted_mode->crtc_clock);
2098 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2099 intel_state->cdclk);
2101 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2102 PIPE_WM_LINETIME_TIME(linetime);
2105 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2107 struct drm_i915_private *dev_priv = to_i915(dev);
2109 if (IS_GEN9(dev_priv)) {
2112 int level, max_level = ilk_wm_max_level(dev_priv);
2114 /* read the first set of memory latencies[0:3] */
2115 val = 0; /* data0 to be programmed to 0 for first set */
2116 mutex_lock(&dev_priv->rps.hw_lock);
2117 ret = sandybridge_pcode_read(dev_priv,
2118 GEN9_PCODE_READ_MEM_LATENCY,
2120 mutex_unlock(&dev_priv->rps.hw_lock);
2123 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2127 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2128 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2129 GEN9_MEM_LATENCY_LEVEL_MASK;
2130 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2131 GEN9_MEM_LATENCY_LEVEL_MASK;
2132 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2133 GEN9_MEM_LATENCY_LEVEL_MASK;
2135 /* read the second set of memory latencies[4:7] */
2136 val = 1; /* data0 to be programmed to 1 for second set */
2137 mutex_lock(&dev_priv->rps.hw_lock);
2138 ret = sandybridge_pcode_read(dev_priv,
2139 GEN9_PCODE_READ_MEM_LATENCY,
2141 mutex_unlock(&dev_priv->rps.hw_lock);
2143 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2147 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2148 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2149 GEN9_MEM_LATENCY_LEVEL_MASK;
2150 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2151 GEN9_MEM_LATENCY_LEVEL_MASK;
2152 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2153 GEN9_MEM_LATENCY_LEVEL_MASK;
2156 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2157 * need to be disabled. We make sure to sanitize the values out
2158 * of the punit to satisfy this requirement.
2160 for (level = 1; level <= max_level; level++) {
2161 if (wm[level] == 0) {
2162 for (i = level + 1; i <= max_level; i++)
2169 * WaWmMemoryReadLatency:skl
2171 * punit doesn't take into account the read latency so we need
2172 * to add 2us to the various latency levels we retrieve from the
2173 * punit when level 0 response data us 0us.
2177 for (level = 1; level <= max_level; level++) {
2184 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2185 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2187 wm[0] = (sskpd >> 56) & 0xFF;
2189 wm[0] = sskpd & 0xF;
2190 wm[1] = (sskpd >> 4) & 0xFF;
2191 wm[2] = (sskpd >> 12) & 0xFF;
2192 wm[3] = (sskpd >> 20) & 0x1FF;
2193 wm[4] = (sskpd >> 32) & 0x1FF;
2194 } else if (INTEL_INFO(dev)->gen >= 6) {
2195 uint32_t sskpd = I915_READ(MCH_SSKPD);
2197 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2198 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2199 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2200 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2201 } else if (INTEL_INFO(dev)->gen >= 5) {
2202 uint32_t mltr = I915_READ(MLTR_ILK);
2204 /* ILK primary LP0 latency is 700 ns */
2206 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2207 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2211 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2214 /* ILK sprite LP0 latency is 1300 ns */
2215 if (IS_GEN5(dev_priv))
2219 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2222 /* ILK cursor LP0 latency is 1300 ns */
2223 if (IS_GEN5(dev_priv))
2226 /* WaDoubleCursorLP3Latency:ivb */
2227 if (IS_IVYBRIDGE(dev_priv))
2231 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2233 /* how many WM levels are we expecting */
2234 if (INTEL_GEN(dev_priv) >= 9)
2236 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2238 else if (INTEL_GEN(dev_priv) >= 6)
2244 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2246 const uint16_t wm[8])
2248 int level, max_level = ilk_wm_max_level(dev_priv);
2250 for (level = 0; level <= max_level; level++) {
2251 unsigned int latency = wm[level];
2254 DRM_ERROR("%s WM%d latency not provided\n",
2260 * - latencies are in us on gen9.
2261 * - before then, WM1+ latency values are in 0.5us units
2263 if (IS_GEN9(dev_priv))
2268 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2269 name, level, wm[level],
2270 latency / 10, latency % 10);
2274 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2275 uint16_t wm[5], uint16_t min)
2277 int level, max_level = ilk_wm_max_level(dev_priv);
2282 wm[0] = max(wm[0], min);
2283 for (level = 1; level <= max_level; level++)
2284 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2289 static void snb_wm_latency_quirk(struct drm_device *dev)
2291 struct drm_i915_private *dev_priv = to_i915(dev);
2295 * The BIOS provided WM memory latency values are often
2296 * inadequate for high resolution displays. Adjust them.
2298 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2299 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2300 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2305 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2306 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2307 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2308 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2311 static void ilk_setup_wm_latency(struct drm_device *dev)
2313 struct drm_i915_private *dev_priv = to_i915(dev);
2315 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2317 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2318 sizeof(dev_priv->wm.pri_latency));
2319 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2320 sizeof(dev_priv->wm.pri_latency));
2322 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
2323 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
2325 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2326 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2327 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2329 if (IS_GEN6(dev_priv))
2330 snb_wm_latency_quirk(dev);
2333 static void skl_setup_wm_latency(struct drm_device *dev)
2335 struct drm_i915_private *dev_priv = to_i915(dev);
2337 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2338 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
2341 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2342 struct intel_pipe_wm *pipe_wm)
2344 /* LP0 watermark maximums depend on this pipe alone */
2345 const struct intel_wm_config config = {
2346 .num_pipes_active = 1,
2347 .sprites_enabled = pipe_wm->sprites_enabled,
2348 .sprites_scaled = pipe_wm->sprites_scaled,
2350 struct ilk_wm_maximums max;
2352 /* LP0 watermarks always use 1/2 DDB partitioning */
2353 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2355 /* At least LP0 must be valid */
2356 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2357 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2364 /* Compute new watermarks for the pipe */
2365 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2367 struct drm_atomic_state *state = cstate->base.state;
2368 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2369 struct intel_pipe_wm *pipe_wm;
2370 struct drm_device *dev = state->dev;
2371 const struct drm_i915_private *dev_priv = to_i915(dev);
2372 struct intel_plane *intel_plane;
2373 struct intel_plane_state *pristate = NULL;
2374 struct intel_plane_state *sprstate = NULL;
2375 struct intel_plane_state *curstate = NULL;
2376 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
2377 struct ilk_wm_maximums max;
2379 pipe_wm = &cstate->wm.ilk.optimal;
2381 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2382 struct intel_plane_state *ps;
2384 ps = intel_atomic_get_existing_plane_state(state,
2389 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2391 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2393 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2397 pipe_wm->pipe_enabled = cstate->base.active;
2399 pipe_wm->sprites_enabled = sprstate->base.visible;
2400 pipe_wm->sprites_scaled = sprstate->base.visible &&
2401 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2402 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
2405 usable_level = max_level;
2407 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2408 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
2411 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2412 if (pipe_wm->sprites_scaled)
2415 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2416 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2418 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2419 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2421 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2422 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2424 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2427 ilk_compute_wm_reg_maximums(dev, 1, &max);
2429 for (level = 1; level <= max_level; level++) {
2430 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2432 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2433 pristate, sprstate, curstate, wm);
2436 * Disable any watermark level that exceeds the
2437 * register maximums since such watermarks are
2440 if (level > usable_level)
2443 if (ilk_validate_wm_level(level, &max, wm))
2444 pipe_wm->wm[level] = *wm;
2446 usable_level = level;
2453 * Build a set of 'intermediate' watermark values that satisfy both the old
2454 * state and the new state. These can be programmed to the hardware
2457 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2458 struct intel_crtc *intel_crtc,
2459 struct intel_crtc_state *newstate)
2461 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2462 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2463 int level, max_level = ilk_wm_max_level(to_i915(dev));
2466 * Start with the final, target watermarks, then combine with the
2467 * currently active watermarks to get values that are safe both before
2468 * and after the vblank.
2470 *a = newstate->wm.ilk.optimal;
2471 a->pipe_enabled |= b->pipe_enabled;
2472 a->sprites_enabled |= b->sprites_enabled;
2473 a->sprites_scaled |= b->sprites_scaled;
2475 for (level = 0; level <= max_level; level++) {
2476 struct intel_wm_level *a_wm = &a->wm[level];
2477 const struct intel_wm_level *b_wm = &b->wm[level];
2479 a_wm->enable &= b_wm->enable;
2480 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2481 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2482 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2483 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2487 * We need to make sure that these merged watermark values are
2488 * actually a valid configuration themselves. If they're not,
2489 * there's no safe way to transition from the old state to
2490 * the new state, so we need to fail the atomic transaction.
2492 if (!ilk_validate_pipe_wm(dev, a))
2496 * If our intermediate WM are identical to the final WM, then we can
2497 * omit the post-vblank programming; only update if it's different.
2499 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2500 newstate->wm.need_postvbl_update = false;
2506 * Merge the watermarks from all active pipes for a specific level.
2508 static void ilk_merge_wm_level(struct drm_device *dev,
2510 struct intel_wm_level *ret_wm)
2512 const struct intel_crtc *intel_crtc;
2514 ret_wm->enable = true;
2516 for_each_intel_crtc(dev, intel_crtc) {
2517 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2518 const struct intel_wm_level *wm = &active->wm[level];
2520 if (!active->pipe_enabled)
2524 * The watermark values may have been used in the past,
2525 * so we must maintain them in the registers for some
2526 * time even if the level is now disabled.
2529 ret_wm->enable = false;
2531 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2532 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2533 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2534 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2539 * Merge all low power watermarks for all active pipes.
2541 static void ilk_wm_merge(struct drm_device *dev,
2542 const struct intel_wm_config *config,
2543 const struct ilk_wm_maximums *max,
2544 struct intel_pipe_wm *merged)
2546 struct drm_i915_private *dev_priv = to_i915(dev);
2547 int level, max_level = ilk_wm_max_level(dev_priv);
2548 int last_enabled_level = max_level;
2550 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2551 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
2552 config->num_pipes_active > 1)
2553 last_enabled_level = 0;
2555 /* ILK: FBC WM must be disabled always */
2556 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2558 /* merge each WM1+ level */
2559 for (level = 1; level <= max_level; level++) {
2560 struct intel_wm_level *wm = &merged->wm[level];
2562 ilk_merge_wm_level(dev, level, wm);
2564 if (level > last_enabled_level)
2566 else if (!ilk_validate_wm_level(level, max, wm))
2567 /* make sure all following levels get disabled */
2568 last_enabled_level = level - 1;
2571 * The spec says it is preferred to disable
2572 * FBC WMs instead of disabling a WM level.
2574 if (wm->fbc_val > max->fbc) {
2576 merged->fbc_wm_enabled = false;
2581 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2583 * FIXME this is racy. FBC might get enabled later.
2584 * What we should check here is whether FBC can be
2585 * enabled sometime later.
2587 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
2588 intel_fbc_is_active(dev_priv)) {
2589 for (level = 2; level <= max_level; level++) {
2590 struct intel_wm_level *wm = &merged->wm[level];
2597 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2599 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2600 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2603 /* The value we need to program into the WM_LPx latency field */
2604 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2606 struct drm_i915_private *dev_priv = to_i915(dev);
2608 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2611 return dev_priv->wm.pri_latency[level];
2614 static void ilk_compute_wm_results(struct drm_device *dev,
2615 const struct intel_pipe_wm *merged,
2616 enum intel_ddb_partitioning partitioning,
2617 struct ilk_wm_values *results)
2619 struct intel_crtc *intel_crtc;
2622 results->enable_fbc_wm = merged->fbc_wm_enabled;
2623 results->partitioning = partitioning;
2625 /* LP1+ register values */
2626 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2627 const struct intel_wm_level *r;
2629 level = ilk_wm_lp_to_level(wm_lp, merged);
2631 r = &merged->wm[level];
2634 * Maintain the watermark values even if the level is
2635 * disabled. Doing otherwise could cause underruns.
2637 results->wm_lp[wm_lp - 1] =
2638 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2639 (r->pri_val << WM1_LP_SR_SHIFT) |
2643 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2645 if (INTEL_INFO(dev)->gen >= 8)
2646 results->wm_lp[wm_lp - 1] |=
2647 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2649 results->wm_lp[wm_lp - 1] |=
2650 r->fbc_val << WM1_LP_FBC_SHIFT;
2653 * Always set WM1S_LP_EN when spr_val != 0, even if the
2654 * level is disabled. Doing otherwise could cause underruns.
2656 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2657 WARN_ON(wm_lp != 1);
2658 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2660 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2663 /* LP0 register values */
2664 for_each_intel_crtc(dev, intel_crtc) {
2665 enum pipe pipe = intel_crtc->pipe;
2666 const struct intel_wm_level *r =
2667 &intel_crtc->wm.active.ilk.wm[0];
2669 if (WARN_ON(!r->enable))
2672 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2674 results->wm_pipe[pipe] =
2675 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2676 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2681 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2682 * case both are at the same level. Prefer r1 in case they're the same. */
2683 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2684 struct intel_pipe_wm *r1,
2685 struct intel_pipe_wm *r2)
2687 int level, max_level = ilk_wm_max_level(to_i915(dev));
2688 int level1 = 0, level2 = 0;
2690 for (level = 1; level <= max_level; level++) {
2691 if (r1->wm[level].enable)
2693 if (r2->wm[level].enable)
2697 if (level1 == level2) {
2698 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2702 } else if (level1 > level2) {
2709 /* dirty bits used to track which watermarks need changes */
2710 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2711 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2712 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2713 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2714 #define WM_DIRTY_FBC (1 << 24)
2715 #define WM_DIRTY_DDB (1 << 25)
2717 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2718 const struct ilk_wm_values *old,
2719 const struct ilk_wm_values *new)
2721 unsigned int dirty = 0;
2725 for_each_pipe(dev_priv, pipe) {
2726 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2727 dirty |= WM_DIRTY_LINETIME(pipe);
2728 /* Must disable LP1+ watermarks too */
2729 dirty |= WM_DIRTY_LP_ALL;
2732 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2733 dirty |= WM_DIRTY_PIPE(pipe);
2734 /* Must disable LP1+ watermarks too */
2735 dirty |= WM_DIRTY_LP_ALL;
2739 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2740 dirty |= WM_DIRTY_FBC;
2741 /* Must disable LP1+ watermarks too */
2742 dirty |= WM_DIRTY_LP_ALL;
2745 if (old->partitioning != new->partitioning) {
2746 dirty |= WM_DIRTY_DDB;
2747 /* Must disable LP1+ watermarks too */
2748 dirty |= WM_DIRTY_LP_ALL;
2751 /* LP1+ watermarks already deemed dirty, no need to continue */
2752 if (dirty & WM_DIRTY_LP_ALL)
2755 /* Find the lowest numbered LP1+ watermark in need of an update... */
2756 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2757 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2758 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2762 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2763 for (; wm_lp <= 3; wm_lp++)
2764 dirty |= WM_DIRTY_LP(wm_lp);
2769 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2772 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2773 bool changed = false;
2775 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2776 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2777 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2780 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2781 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2782 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2785 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2786 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2787 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2792 * Don't touch WM1S_LP_EN here.
2793 * Doing so could cause underruns.
2800 * The spec says we shouldn't write when we don't need, because every write
2801 * causes WMs to be re-evaluated, expending some power.
2803 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2804 struct ilk_wm_values *results)
2806 struct drm_device *dev = &dev_priv->drm;
2807 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2811 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2815 _ilk_disable_lp_wm(dev_priv, dirty);
2817 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2818 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2819 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2820 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2821 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2822 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2824 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2825 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2826 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2827 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2828 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2829 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2831 if (dirty & WM_DIRTY_DDB) {
2832 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2833 val = I915_READ(WM_MISC);
2834 if (results->partitioning == INTEL_DDB_PART_1_2)
2835 val &= ~WM_MISC_DATA_PARTITION_5_6;
2837 val |= WM_MISC_DATA_PARTITION_5_6;
2838 I915_WRITE(WM_MISC, val);
2840 val = I915_READ(DISP_ARB_CTL2);
2841 if (results->partitioning == INTEL_DDB_PART_1_2)
2842 val &= ~DISP_DATA_PARTITION_5_6;
2844 val |= DISP_DATA_PARTITION_5_6;
2845 I915_WRITE(DISP_ARB_CTL2, val);
2849 if (dirty & WM_DIRTY_FBC) {
2850 val = I915_READ(DISP_ARB_CTL);
2851 if (results->enable_fbc_wm)
2852 val &= ~DISP_FBC_WM_DIS;
2854 val |= DISP_FBC_WM_DIS;
2855 I915_WRITE(DISP_ARB_CTL, val);
2858 if (dirty & WM_DIRTY_LP(1) &&
2859 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2860 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2862 if (INTEL_INFO(dev)->gen >= 7) {
2863 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2864 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2865 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2866 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2869 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2870 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2871 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2872 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2873 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2874 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2876 dev_priv->wm.hw = *results;
2879 bool ilk_disable_lp_wm(struct drm_device *dev)
2881 struct drm_i915_private *dev_priv = to_i915(dev);
2883 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2886 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
2889 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2890 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2891 * other universal planes are in indices 1..n. Note that this may leave unused
2892 * indices between the top "sprite" plane and the cursor.
2895 skl_wm_plane_id(const struct intel_plane *plane)
2897 switch (plane->base.type) {
2898 case DRM_PLANE_TYPE_PRIMARY:
2900 case DRM_PLANE_TYPE_CURSOR:
2901 return PLANE_CURSOR;
2902 case DRM_PLANE_TYPE_OVERLAY:
2903 return plane->plane + 1;
2905 MISSING_CASE(plane->base.type);
2906 return plane->plane;
2911 * FIXME: We still don't have the proper code detect if we need to apply the WA,
2912 * so assume we'll always need it in order to avoid underruns.
2914 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
2916 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2918 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
2919 IS_KABYLAKE(dev_priv))
2926 intel_has_sagv(struct drm_i915_private *dev_priv)
2928 if (IS_KABYLAKE(dev_priv))
2931 if (IS_SKYLAKE(dev_priv) &&
2932 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
2939 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2940 * depending on power and performance requirements. The display engine access
2941 * to system memory is blocked during the adjustment time. Because of the
2942 * blocking time, having this enabled can cause full system hangs and/or pipe
2943 * underruns if we don't meet all of the following requirements:
2945 * - <= 1 pipe enabled
2946 * - All planes can enable watermarks for latencies >= SAGV engine block time
2947 * - We're not using an interlaced display configuration
2950 intel_enable_sagv(struct drm_i915_private *dev_priv)
2954 if (!intel_has_sagv(dev_priv))
2957 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
2960 DRM_DEBUG_KMS("Enabling the SAGV\n");
2961 mutex_lock(&dev_priv->rps.hw_lock);
2963 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2966 /* We don't need to wait for the SAGV when enabling */
2967 mutex_unlock(&dev_priv->rps.hw_lock);
2970 * Some skl systems, pre-release machines in particular,
2971 * don't actually have an SAGV.
2973 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2974 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2975 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2977 } else if (ret < 0) {
2978 DRM_ERROR("Failed to enable the SAGV\n");
2982 dev_priv->sagv_status = I915_SAGV_ENABLED;
2987 intel_do_sagv_disable(struct drm_i915_private *dev_priv)
2990 uint32_t temp = GEN9_SAGV_DISABLE;
2992 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2997 return temp & GEN9_SAGV_IS_DISABLED;
3001 intel_disable_sagv(struct drm_i915_private *dev_priv)
3005 if (!intel_has_sagv(dev_priv))
3008 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3011 DRM_DEBUG_KMS("Disabling the SAGV\n");
3012 mutex_lock(&dev_priv->rps.hw_lock);
3014 /* bspec says to keep retrying for at least 1 ms */
3015 ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
3016 mutex_unlock(&dev_priv->rps.hw_lock);
3018 if (ret == -ETIMEDOUT) {
3019 DRM_ERROR("Request to disable SAGV timed out\n");
3024 * Some skl systems, pre-release machines in particular,
3025 * don't actually have an SAGV.
3027 if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
3028 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3029 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3031 } else if (result < 0) {
3032 DRM_ERROR("Failed to disable the SAGV\n");
3036 dev_priv->sagv_status = I915_SAGV_DISABLED;
3040 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3042 struct drm_device *dev = state->dev;
3043 struct drm_i915_private *dev_priv = to_i915(dev);
3044 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3045 struct intel_crtc *crtc;
3046 struct intel_plane *plane;
3047 struct intel_crtc_state *cstate;
3048 struct skl_plane_wm *wm;
3052 if (!intel_has_sagv(dev_priv))
3056 * SKL workaround: bspec recommends we disable the SAGV when we have
3057 * more then one pipe enabled
3059 * If there are no active CRTCs, no additional checks need be performed
3061 if (hweight32(intel_state->active_crtcs) == 0)
3063 else if (hweight32(intel_state->active_crtcs) > 1)
3066 /* Since we're now guaranteed to only have one active CRTC... */
3067 pipe = ffs(intel_state->active_crtcs) - 1;
3068 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3069 cstate = to_intel_crtc_state(crtc->base.state);
3071 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3074 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3075 wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
3077 /* Skip this plane if it's not enabled */
3078 if (!wm->wm[0].plane_en)
3081 /* Find the highest enabled wm level for this plane */
3082 for (level = ilk_wm_max_level(dev_priv);
3083 !wm->wm[level].plane_en; --level)
3086 latency = dev_priv->wm.skl_latency[level];
3088 if (skl_needs_memory_bw_wa(intel_state) &&
3089 plane->base.state->fb->modifier[0] ==
3090 I915_FORMAT_MOD_X_TILED)
3094 * If any of the planes on this pipe don't enable wm levels
3095 * that incur memory latencies higher then 30µs we can't enable
3098 if (latency < SKL_SAGV_BLOCK_TIME)
3106 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3107 const struct intel_crtc_state *cstate,
3108 struct skl_ddb_entry *alloc, /* out */
3109 int *num_active /* out */)
3111 struct drm_atomic_state *state = cstate->base.state;
3112 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3113 struct drm_i915_private *dev_priv = to_i915(dev);
3114 struct drm_crtc *for_crtc = cstate->base.crtc;
3115 unsigned int pipe_size, ddb_size;
3116 int nth_active_pipe;
3118 if (WARN_ON(!state) || !cstate->base.active) {
3121 *num_active = hweight32(dev_priv->active_crtcs);
3125 if (intel_state->active_pipe_changes)
3126 *num_active = hweight32(intel_state->active_crtcs);
3128 *num_active = hweight32(dev_priv->active_crtcs);
3130 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3131 WARN_ON(ddb_size == 0);
3133 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3136 * If the state doesn't change the active CRTC's, then there's
3137 * no need to recalculate; the existing pipe allocation limits
3138 * should remain unchanged. Note that we're safe from racing
3139 * commits since any racing commit that changes the active CRTC
3140 * list would need to grab _all_ crtc locks, including the one
3141 * we currently hold.
3143 if (!intel_state->active_pipe_changes) {
3144 *alloc = to_intel_crtc(for_crtc)->hw_ddb;
3148 nth_active_pipe = hweight32(intel_state->active_crtcs &
3149 (drm_crtc_mask(for_crtc) - 1));
3150 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3151 alloc->start = nth_active_pipe * ddb_size / *num_active;
3152 alloc->end = alloc->start + pipe_size;
3155 static unsigned int skl_cursor_allocation(int num_active)
3157 if (num_active == 1)
3163 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3165 entry->start = reg & 0x3ff;
3166 entry->end = (reg >> 16) & 0x3ff;
3171 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3172 struct skl_ddb_allocation *ddb /* out */)
3178 memset(ddb, 0, sizeof(*ddb));
3180 for_each_pipe(dev_priv, pipe) {
3181 enum intel_display_power_domain power_domain;
3183 power_domain = POWER_DOMAIN_PIPE(pipe);
3184 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3187 for_each_universal_plane(dev_priv, pipe, plane) {
3188 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
3189 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
3193 val = I915_READ(CUR_BUF_CFG(pipe));
3194 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
3197 intel_display_power_put(dev_priv, power_domain);
3202 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3203 * The bspec defines downscale amount as:
3206 * Horizontal down scale amount = maximum[1, Horizontal source size /
3207 * Horizontal destination size]
3208 * Vertical down scale amount = maximum[1, Vertical source size /
3209 * Vertical destination size]
3210 * Total down scale amount = Horizontal down scale amount *
3211 * Vertical down scale amount
3214 * Return value is provided in 16.16 fixed point form to retain fractional part.
3215 * Caller should take care of dividing & rounding off the value.
3218 skl_plane_downscale_amount(const struct intel_plane_state *pstate)
3220 uint32_t downscale_h, downscale_w;
3221 uint32_t src_w, src_h, dst_w, dst_h;
3223 if (WARN_ON(!pstate->base.visible))
3224 return DRM_PLANE_HELPER_NO_SCALING;
3226 /* n.b., src is 16.16 fixed point, dst is whole integer */
3227 src_w = drm_rect_width(&pstate->base.src);
3228 src_h = drm_rect_height(&pstate->base.src);
3229 dst_w = drm_rect_width(&pstate->base.dst);
3230 dst_h = drm_rect_height(&pstate->base.dst);
3231 if (drm_rotation_90_or_270(pstate->base.rotation))
3234 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3235 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3237 /* Provide result in 16.16 fixed point */
3238 return (uint64_t)downscale_w * downscale_h >> 16;
3242 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3243 const struct drm_plane_state *pstate,
3246 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3247 struct drm_framebuffer *fb = pstate->fb;
3248 uint32_t down_scale_amount, data_rate;
3249 uint32_t width = 0, height = 0;
3250 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3252 if (!intel_pstate->base.visible)
3254 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3256 if (y && format != DRM_FORMAT_NV12)
3259 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3260 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3262 if (drm_rotation_90_or_270(pstate->rotation))
3263 swap(width, height);
3265 /* for planar format */
3266 if (format == DRM_FORMAT_NV12) {
3267 if (y) /* y-plane data rate */
3268 data_rate = width * height *
3269 drm_format_plane_cpp(format, 0);
3270 else /* uv-plane data rate */
3271 data_rate = (width / 2) * (height / 2) *
3272 drm_format_plane_cpp(format, 1);
3274 /* for packed formats */
3275 data_rate = width * height * drm_format_plane_cpp(format, 0);
3278 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3280 return (uint64_t)data_rate * down_scale_amount >> 16;
3284 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3285 * a 8192x4096@32bpp framebuffer:
3286 * 3 * 4096 * 8192 * 4 < 2^32
3289 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
3290 unsigned *plane_data_rate,
3291 unsigned *plane_y_data_rate)
3293 struct drm_crtc_state *cstate = &intel_cstate->base;
3294 struct drm_atomic_state *state = cstate->state;
3295 struct drm_plane *plane;
3296 const struct intel_plane *intel_plane;
3297 const struct drm_plane_state *pstate;
3298 unsigned int rate, total_data_rate = 0;
3301 if (WARN_ON(!state))
3304 /* Calculate and cache data rate for each plane */
3305 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
3306 id = skl_wm_plane_id(to_intel_plane(plane));
3307 intel_plane = to_intel_plane(plane);
3310 rate = skl_plane_relative_data_rate(intel_cstate,
3312 plane_data_rate[id] = rate;
3314 total_data_rate += rate;
3317 rate = skl_plane_relative_data_rate(intel_cstate,
3319 plane_y_data_rate[id] = rate;
3321 total_data_rate += rate;
3324 return total_data_rate;
3328 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3331 struct drm_framebuffer *fb = pstate->fb;
3332 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3333 uint32_t src_w, src_h;
3334 uint32_t min_scanlines = 8;
3340 /* For packed formats, no y-plane, return 0 */
3341 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3344 /* For Non Y-tile return 8-blocks */
3345 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3346 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3349 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3350 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3352 if (drm_rotation_90_or_270(pstate->rotation))
3355 /* Halve UV plane width and height for NV12 */
3356 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3361 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3362 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3364 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3366 if (drm_rotation_90_or_270(pstate->rotation)) {
3367 switch (plane_bpp) {
3381 WARN(1, "Unsupported pixel depth %u for rotation",
3387 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3391 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
3392 uint16_t *minimum, uint16_t *y_minimum)
3394 const struct drm_plane_state *pstate;
3395 struct drm_plane *plane;
3397 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
3398 struct intel_plane *intel_plane = to_intel_plane(plane);
3399 int id = skl_wm_plane_id(intel_plane);
3401 if (id == PLANE_CURSOR)
3404 if (!pstate->visible)
3407 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3408 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3411 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
3415 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3416 struct skl_ddb_allocation *ddb /* out */)
3418 struct drm_atomic_state *state = cstate->base.state;
3419 struct drm_crtc *crtc = cstate->base.crtc;
3420 struct drm_device *dev = crtc->dev;
3421 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3422 enum pipe pipe = intel_crtc->pipe;
3423 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
3424 uint16_t alloc_size, start;
3425 uint16_t minimum[I915_MAX_PLANES] = {};
3426 uint16_t y_minimum[I915_MAX_PLANES] = {};
3427 unsigned int total_data_rate;
3430 unsigned plane_data_rate[I915_MAX_PLANES] = {};
3431 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
3433 /* Clear the partitioning for disabled planes. */
3434 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3435 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3437 if (WARN_ON(!state))
3440 if (!cstate->base.active) {
3441 alloc->start = alloc->end = 0;
3445 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3446 alloc_size = skl_ddb_entry_size(alloc);
3447 if (alloc_size == 0) {
3448 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3452 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
3455 * 1. Allocate the mininum required blocks for each active plane
3456 * and allocate the cursor, it doesn't require extra allocation
3457 * proportional to the data rate.
3460 for (i = 0; i < I915_MAX_PLANES; i++) {
3461 alloc_size -= minimum[i];
3462 alloc_size -= y_minimum[i];
3465 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
3466 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3469 * 2. Distribute the remaining space in proportion to the amount of
3470 * data each plane needs to fetch from memory.
3472 * FIXME: we may not allocate every single block here.
3474 total_data_rate = skl_get_total_relative_data_rate(cstate,
3477 if (total_data_rate == 0)
3480 start = alloc->start;
3481 for (id = 0; id < I915_MAX_PLANES; id++) {
3482 unsigned int data_rate, y_data_rate;
3483 uint16_t plane_blocks, y_plane_blocks = 0;
3485 if (id == PLANE_CURSOR)
3488 data_rate = plane_data_rate[id];
3491 * allocation for (packed formats) or (uv-plane part of planar format):
3492 * promote the expression to 64 bits to avoid overflowing, the
3493 * result is < available as data_rate / total_data_rate < 1
3495 plane_blocks = minimum[id];
3496 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3499 /* Leave disabled planes at (0,0) */
3501 ddb->plane[pipe][id].start = start;
3502 ddb->plane[pipe][id].end = start + plane_blocks;
3505 start += plane_blocks;
3508 * allocation for y_plane part of planar format:
3510 y_data_rate = plane_y_data_rate[id];
3512 y_plane_blocks = y_minimum[id];
3513 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3517 ddb->y_plane[pipe][id].start = start;
3518 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3521 start += y_plane_blocks;
3528 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3529 * for the read latency) and cpp should always be <= 8, so that
3530 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3531 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3533 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3535 uint32_t wm_intermediate_val, ret;
3540 wm_intermediate_val = latency * pixel_rate * cpp / 512;
3541 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3546 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3547 uint32_t latency, uint32_t plane_blocks_per_line)
3550 uint32_t wm_intermediate_val;
3555 wm_intermediate_val = latency * pixel_rate;
3556 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3557 plane_blocks_per_line;
3562 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3563 struct intel_plane_state *pstate)
3565 uint64_t adjusted_pixel_rate;
3566 uint64_t downscale_amount;
3567 uint64_t pixel_rate;
3569 /* Shouldn't reach here on disabled planes... */
3570 if (WARN_ON(!pstate->base.visible))
3574 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3575 * with additional adjustments for plane-specific scaling.
3577 adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
3578 downscale_amount = skl_plane_downscale_amount(pstate);
3580 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3581 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3586 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3587 struct intel_crtc_state *cstate,
3588 struct intel_plane_state *intel_pstate,
3589 uint16_t ddb_allocation,
3591 uint16_t *out_blocks, /* out */
3592 uint8_t *out_lines, /* out */
3593 bool *enabled /* out */)
3595 struct drm_plane_state *pstate = &intel_pstate->base;
3596 struct drm_framebuffer *fb = pstate->fb;
3597 uint32_t latency = dev_priv->wm.skl_latency[level];
3598 uint32_t method1, method2;
3599 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3600 uint32_t res_blocks, res_lines;
3601 uint32_t selected_result;
3603 uint32_t width = 0, height = 0;
3604 uint32_t plane_pixel_rate;
3605 uint32_t y_tile_minimum, y_min_scanlines;
3606 struct intel_atomic_state *state =
3607 to_intel_atomic_state(cstate->base.state);
3608 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
3610 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
3615 if (apply_memory_bw_wa && fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
3618 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3619 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3621 if (drm_rotation_90_or_270(pstate->rotation))
3622 swap(width, height);
3624 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3625 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3627 if (drm_rotation_90_or_270(pstate->rotation)) {
3628 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3629 drm_format_plane_cpp(fb->pixel_format, 1) :
3630 drm_format_plane_cpp(fb->pixel_format, 0);
3634 y_min_scanlines = 16;
3637 y_min_scanlines = 8;
3640 y_min_scanlines = 4;
3647 y_min_scanlines = 4;
3650 plane_bytes_per_line = width * cpp;
3651 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3652 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3653 plane_blocks_per_line =
3654 DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
3655 plane_blocks_per_line /= y_min_scanlines;
3656 } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
3657 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
3660 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3663 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3664 method2 = skl_wm_method2(plane_pixel_rate,
3665 cstate->base.adjusted_mode.crtc_htotal,
3667 plane_blocks_per_line);
3669 y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
3670 if (apply_memory_bw_wa)
3671 y_tile_minimum *= 2;
3673 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3674 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3675 selected_result = max(method2, y_tile_minimum);
3677 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
3678 (plane_bytes_per_line / 512 < 1))
3679 selected_result = method2;
3680 else if ((ddb_allocation / plane_blocks_per_line) >= 1)
3681 selected_result = min(method1, method2);
3683 selected_result = method1;
3686 res_blocks = selected_result + 1;
3687 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3689 if (level >= 1 && level <= 7) {
3690 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3691 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3692 res_blocks += y_tile_minimum;
3693 res_lines += y_min_scanlines;
3699 if (res_blocks >= ddb_allocation || res_lines > 31) {
3703 * If there are no valid level 0 watermarks, then we can't
3704 * support this display configuration.
3709 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3710 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3711 to_intel_crtc(cstate->base.crtc)->pipe,
3712 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3713 res_blocks, ddb_allocation, res_lines);
3719 *out_blocks = res_blocks;
3720 *out_lines = res_lines;
3727 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3728 struct skl_ddb_allocation *ddb,
3729 struct intel_crtc_state *cstate,
3730 struct intel_plane *intel_plane,
3732 struct skl_wm_level *result)
3734 struct drm_atomic_state *state = cstate->base.state;
3735 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3736 struct drm_plane *plane = &intel_plane->base;
3737 struct intel_plane_state *intel_pstate = NULL;
3738 uint16_t ddb_blocks;
3739 enum pipe pipe = intel_crtc->pipe;
3741 int i = skl_wm_plane_id(intel_plane);
3745 intel_atomic_get_existing_plane_state(state,
3749 * Note: If we start supporting multiple pending atomic commits against
3750 * the same planes/CRTC's in the future, plane->state will no longer be
3751 * the correct pre-state to use for the calculations here and we'll
3752 * need to change where we get the 'unchanged' plane data from.
3754 * For now this is fine because we only allow one queued commit against
3755 * a CRTC. Even if the plane isn't modified by this transaction and we
3756 * don't have a plane lock, we still have the CRTC's lock, so we know
3757 * that no other transactions are racing with us to update it.
3760 intel_pstate = to_intel_plane_state(plane->state);
3762 WARN_ON(!intel_pstate->base.fb);
3764 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3766 ret = skl_compute_plane_wm(dev_priv,
3771 &result->plane_res_b,
3772 &result->plane_res_l,
3781 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3783 uint32_t pixel_rate;
3785 if (!cstate->base.active)
3788 pixel_rate = ilk_pipe_pixel_rate(cstate);
3790 if (WARN_ON(pixel_rate == 0))
3793 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3797 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3798 struct skl_wm_level *trans_wm /* out */)
3800 if (!cstate->base.active)
3803 /* Until we know more, just disable transition WMs */
3804 trans_wm->plane_en = false;
3807 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3808 struct skl_ddb_allocation *ddb,
3809 struct skl_pipe_wm *pipe_wm)
3811 struct drm_device *dev = cstate->base.crtc->dev;
3812 const struct drm_i915_private *dev_priv = to_i915(dev);
3813 struct intel_plane *intel_plane;
3814 struct skl_plane_wm *wm;
3815 int level, max_level = ilk_wm_max_level(dev_priv);
3819 * We'll only calculate watermarks for planes that are actually
3820 * enabled, so make sure all other planes are set as disabled.
3822 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
3824 for_each_intel_plane_mask(&dev_priv->drm,
3826 cstate->base.plane_mask) {
3827 wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
3829 for (level = 0; level <= max_level; level++) {
3830 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3836 skl_compute_transition_wm(cstate, &wm->trans_wm);
3838 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3843 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3845 const struct skl_ddb_entry *entry)
3848 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3853 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
3855 const struct skl_wm_level *level)
3859 if (level->plane_en) {
3861 val |= level->plane_res_b;
3862 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
3865 I915_WRITE(reg, val);
3868 void skl_write_plane_wm(struct intel_crtc *intel_crtc,
3869 const struct skl_plane_wm *wm,
3870 const struct skl_ddb_allocation *ddb,
3873 struct drm_crtc *crtc = &intel_crtc->base;
3874 struct drm_device *dev = crtc->dev;
3875 struct drm_i915_private *dev_priv = to_i915(dev);
3876 int level, max_level = ilk_wm_max_level(dev_priv);
3877 enum pipe pipe = intel_crtc->pipe;
3879 for (level = 0; level <= max_level; level++) {
3880 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
3883 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
3886 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
3887 &ddb->plane[pipe][plane]);
3888 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
3889 &ddb->y_plane[pipe][plane]);
3892 void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
3893 const struct skl_plane_wm *wm,
3894 const struct skl_ddb_allocation *ddb)
3896 struct drm_crtc *crtc = &intel_crtc->base;
3897 struct drm_device *dev = crtc->dev;
3898 struct drm_i915_private *dev_priv = to_i915(dev);
3899 int level, max_level = ilk_wm_max_level(dev_priv);
3900 enum pipe pipe = intel_crtc->pipe;
3902 for (level = 0; level <= max_level; level++) {
3903 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
3906 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
3908 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3909 &ddb->plane[pipe][PLANE_CURSOR]);
3912 bool skl_wm_level_equals(const struct skl_wm_level *l1,
3913 const struct skl_wm_level *l2)
3915 if (l1->plane_en != l2->plane_en)
3918 /* If both planes aren't enabled, the rest shouldn't matter */
3922 return (l1->plane_res_l == l2->plane_res_l &&
3923 l1->plane_res_b == l2->plane_res_b);
3926 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
3927 const struct skl_ddb_entry *b)
3929 return a->start < b->end && b->start < a->end;
3932 bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
3933 struct intel_crtc *intel_crtc)
3935 struct drm_crtc *other_crtc;
3936 struct drm_crtc_state *other_cstate;
3937 struct intel_crtc *other_intel_crtc;
3938 const struct skl_ddb_entry *ddb =
3939 &to_intel_crtc_state(intel_crtc->base.state)->wm.skl.ddb;
3942 for_each_crtc_in_state(state, other_crtc, other_cstate, i) {
3943 other_intel_crtc = to_intel_crtc(other_crtc);
3945 if (other_intel_crtc == intel_crtc)
3948 if (skl_ddb_entries_overlap(ddb, &other_intel_crtc->hw_ddb))
3955 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3956 const struct skl_pipe_wm *old_pipe_wm,
3957 struct skl_pipe_wm *pipe_wm, /* out */
3958 struct skl_ddb_allocation *ddb, /* out */
3959 bool *changed /* out */)
3961 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3964 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3968 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
3977 pipes_modified(struct drm_atomic_state *state)
3979 struct drm_crtc *crtc;
3980 struct drm_crtc_state *cstate;
3981 uint32_t i, ret = 0;
3983 for_each_crtc_in_state(state, crtc, cstate, i)
3984 ret |= drm_crtc_mask(crtc);
3990 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
3992 struct drm_atomic_state *state = cstate->base.state;
3993 struct drm_device *dev = state->dev;
3994 struct drm_crtc *crtc = cstate->base.crtc;
3995 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3996 struct drm_i915_private *dev_priv = to_i915(dev);
3997 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3998 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
3999 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
4000 struct drm_plane_state *plane_state;
4001 struct drm_plane *plane;
4002 enum pipe pipe = intel_crtc->pipe;
4005 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
4007 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
4008 id = skl_wm_plane_id(to_intel_plane(plane));
4010 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
4011 &new_ddb->plane[pipe][id]) &&
4012 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
4013 &new_ddb->y_plane[pipe][id]))
4016 plane_state = drm_atomic_get_plane_state(state, plane);
4017 if (IS_ERR(plane_state))
4018 return PTR_ERR(plane_state);
4025 skl_compute_ddb(struct drm_atomic_state *state)
4027 struct drm_device *dev = state->dev;
4028 struct drm_i915_private *dev_priv = to_i915(dev);
4029 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4030 struct intel_crtc *intel_crtc;
4031 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
4032 uint32_t realloc_pipes = pipes_modified(state);
4036 * If this is our first atomic update following hardware readout,
4037 * we can't trust the DDB that the BIOS programmed for us. Let's
4038 * pretend that all pipes switched active status so that we'll
4039 * ensure a full DDB recompute.
4041 if (dev_priv->wm.distrust_bios_wm) {
4042 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4043 state->acquire_ctx);
4047 intel_state->active_pipe_changes = ~0;
4050 * We usually only initialize intel_state->active_crtcs if we
4051 * we're doing a modeset; make sure this field is always
4052 * initialized during the sanitization process that happens
4053 * on the first commit too.
4055 if (!intel_state->modeset)
4056 intel_state->active_crtcs = dev_priv->active_crtcs;
4060 * If the modeset changes which CRTC's are active, we need to
4061 * recompute the DDB allocation for *all* active pipes, even
4062 * those that weren't otherwise being modified in any way by this
4063 * atomic commit. Due to the shrinking of the per-pipe allocations
4064 * when new active CRTC's are added, it's possible for a pipe that
4065 * we were already using and aren't changing at all here to suddenly
4066 * become invalid if its DDB needs exceeds its new allocation.
4068 * Note that if we wind up doing a full DDB recompute, we can't let
4069 * any other display updates race with this transaction, so we need
4070 * to grab the lock on *all* CRTC's.
4072 if (intel_state->active_pipe_changes) {
4074 intel_state->wm_results.dirty_pipes = ~0;
4078 * We're not recomputing for the pipes not included in the commit, so
4079 * make sure we start with the current state.
4081 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4083 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4084 struct intel_crtc_state *cstate;
4086 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4088 return PTR_ERR(cstate);
4090 ret = skl_allocate_pipe_ddb(cstate, ddb);
4094 ret = skl_ddb_add_affected_planes(cstate);
4103 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4104 struct skl_wm_values *src,
4107 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4108 sizeof(dst->ddb.y_plane[pipe]));
4109 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4110 sizeof(dst->ddb.plane[pipe]));
4114 skl_print_wm_changes(const struct drm_atomic_state *state)
4116 const struct drm_device *dev = state->dev;
4117 const struct drm_i915_private *dev_priv = to_i915(dev);
4118 const struct intel_atomic_state *intel_state =
4119 to_intel_atomic_state(state);
4120 const struct drm_crtc *crtc;
4121 const struct drm_crtc_state *cstate;
4122 const struct intel_plane *intel_plane;
4123 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
4124 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4128 for_each_crtc_in_state(state, crtc, cstate, i) {
4129 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4130 enum pipe pipe = intel_crtc->pipe;
4132 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4133 const struct skl_ddb_entry *old, *new;
4135 id = skl_wm_plane_id(intel_plane);
4136 old = &old_ddb->plane[pipe][id];
4137 new = &new_ddb->plane[pipe][id];
4139 if (skl_ddb_entry_equal(old, new))
4142 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4143 intel_plane->base.base.id,
4144 intel_plane->base.name,
4145 old->start, old->end,
4146 new->start, new->end);
4152 skl_compute_wm(struct drm_atomic_state *state)
4154 struct drm_crtc *crtc;
4155 struct drm_crtc_state *cstate;
4156 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4157 struct skl_wm_values *results = &intel_state->wm_results;
4158 struct skl_pipe_wm *pipe_wm;
4159 bool changed = false;
4163 * If this transaction isn't actually touching any CRTC's, don't
4164 * bother with watermark calculation. Note that if we pass this
4165 * test, we're guaranteed to hold at least one CRTC state mutex,
4166 * which means we can safely use values like dev_priv->active_crtcs
4167 * since any racing commits that want to update them would need to
4168 * hold _all_ CRTC state mutexes.
4170 for_each_crtc_in_state(state, crtc, cstate, i)
4175 /* Clear all dirty flags */
4176 results->dirty_pipes = 0;
4178 ret = skl_compute_ddb(state);
4183 * Calculate WM's for all pipes that are part of this transaction.
4184 * Note that the DDB allocation above may have added more CRTC's that
4185 * weren't otherwise being modified (and set bits in dirty_pipes) if
4186 * pipe allocations had to change.
4188 * FIXME: Now that we're doing this in the atomic check phase, we
4189 * should allow skl_update_pipe_wm() to return failure in cases where
4190 * no suitable watermark values can be found.
4192 for_each_crtc_in_state(state, crtc, cstate, i) {
4193 struct intel_crtc_state *intel_cstate =
4194 to_intel_crtc_state(cstate);
4195 const struct skl_pipe_wm *old_pipe_wm =
4196 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
4198 pipe_wm = &intel_cstate->wm.skl.optimal;
4199 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
4200 &results->ddb, &changed);
4205 results->dirty_pipes |= drm_crtc_mask(crtc);
4207 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4208 /* This pipe's WM's did not change */
4211 intel_cstate->update_wm_pre = true;
4214 skl_print_wm_changes(state);
4219 static void skl_update_wm(struct intel_crtc *intel_crtc)
4221 struct drm_device *dev = intel_crtc->base.dev;
4222 struct drm_i915_private *dev_priv = to_i915(dev);
4223 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4224 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4225 struct intel_crtc_state *cstate = to_intel_crtc_state(intel_crtc->base.state);
4226 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4227 enum pipe pipe = intel_crtc->pipe;
4229 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
4232 mutex_lock(&dev_priv->wm.wm_mutex);
4235 * If this pipe isn't active already, we're going to be enabling it
4236 * very soon. Since it's safe to update a pipe's ddb allocation while
4237 * the pipe's shut off, just do so here. Already active pipes will have
4238 * their watermarks updated once we update their planes.
4240 if (intel_crtc->base.state->active_changed) {
4243 for_each_universal_plane(dev_priv, pipe, plane)
4244 skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane],
4245 &results->ddb, plane);
4247 skl_write_cursor_wm(intel_crtc, &pipe_wm->planes[PLANE_CURSOR],
4251 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4253 intel_crtc->hw_ddb = cstate->wm.skl.ddb;
4255 mutex_unlock(&dev_priv->wm.wm_mutex);
4258 static void ilk_compute_wm_config(struct drm_device *dev,
4259 struct intel_wm_config *config)
4261 struct intel_crtc *crtc;
4263 /* Compute the currently _active_ config */
4264 for_each_intel_crtc(dev, crtc) {
4265 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4267 if (!wm->pipe_enabled)
4270 config->sprites_enabled |= wm->sprites_enabled;
4271 config->sprites_scaled |= wm->sprites_scaled;
4272 config->num_pipes_active++;
4276 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4278 struct drm_device *dev = &dev_priv->drm;
4279 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4280 struct ilk_wm_maximums max;
4281 struct intel_wm_config config = {};
4282 struct ilk_wm_values results = {};
4283 enum intel_ddb_partitioning partitioning;
4285 ilk_compute_wm_config(dev, &config);
4287 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4288 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4290 /* 5/6 split only in single pipe config on IVB+ */
4291 if (INTEL_INFO(dev)->gen >= 7 &&
4292 config.num_pipes_active == 1 && config.sprites_enabled) {
4293 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4294 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4296 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4298 best_lp_wm = &lp_wm_1_2;
4301 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4302 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4304 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4306 ilk_write_wm_values(dev_priv, &results);
4309 static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
4311 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4312 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4314 mutex_lock(&dev_priv->wm.wm_mutex);
4315 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4316 ilk_program_watermarks(dev_priv);
4317 mutex_unlock(&dev_priv->wm.wm_mutex);
4320 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
4322 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4323 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4325 mutex_lock(&dev_priv->wm.wm_mutex);
4326 if (cstate->wm.need_postvbl_update) {
4327 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4328 ilk_program_watermarks(dev_priv);
4330 mutex_unlock(&dev_priv->wm.wm_mutex);
4333 static inline void skl_wm_level_from_reg_val(uint32_t val,
4334 struct skl_wm_level *level)
4336 level->plane_en = val & PLANE_WM_EN;
4337 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
4338 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
4339 PLANE_WM_LINES_MASK;
4342 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
4343 struct skl_pipe_wm *out)
4345 struct drm_device *dev = crtc->dev;
4346 struct drm_i915_private *dev_priv = to_i915(dev);
4347 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4348 struct intel_plane *intel_plane;
4349 struct skl_plane_wm *wm;
4350 enum pipe pipe = intel_crtc->pipe;
4351 int level, id, max_level;
4354 max_level = ilk_wm_max_level(dev_priv);
4356 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4357 id = skl_wm_plane_id(intel_plane);
4358 wm = &out->planes[id];
4360 for (level = 0; level <= max_level; level++) {
4361 if (id != PLANE_CURSOR)
4362 val = I915_READ(PLANE_WM(pipe, id, level));
4364 val = I915_READ(CUR_WM(pipe, level));
4366 skl_wm_level_from_reg_val(val, &wm->wm[level]);
4369 if (id != PLANE_CURSOR)
4370 val = I915_READ(PLANE_WM_TRANS(pipe, id));
4372 val = I915_READ(CUR_WM_TRANS(pipe));
4374 skl_wm_level_from_reg_val(val, &wm->trans_wm);
4377 if (!intel_crtc->active)
4380 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
4383 void skl_wm_get_hw_state(struct drm_device *dev)
4385 struct drm_i915_private *dev_priv = to_i915(dev);
4386 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4387 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4388 struct drm_crtc *crtc;
4389 struct intel_crtc *intel_crtc;
4390 struct intel_crtc_state *cstate;
4392 skl_ddb_get_hw_state(dev_priv, ddb);
4393 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4394 intel_crtc = to_intel_crtc(crtc);
4395 cstate = to_intel_crtc_state(crtc->state);
4397 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
4399 if (intel_crtc->active)
4400 hw->dirty_pipes |= drm_crtc_mask(crtc);
4403 if (dev_priv->active_crtcs) {
4404 /* Fully recompute DDB on first atomic commit */
4405 dev_priv->wm.distrust_bios_wm = true;
4407 /* Easy/common case; just sanitize DDB now if everything off */
4408 memset(ddb, 0, sizeof(*ddb));
4412 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4414 struct drm_device *dev = crtc->dev;
4415 struct drm_i915_private *dev_priv = to_i915(dev);
4416 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4417 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4418 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4419 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4420 enum pipe pipe = intel_crtc->pipe;
4421 static const i915_reg_t wm0_pipe_reg[] = {
4422 [PIPE_A] = WM0_PIPEA_ILK,
4423 [PIPE_B] = WM0_PIPEB_ILK,
4424 [PIPE_C] = WM0_PIPEC_IVB,
4427 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4428 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4429 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4431 memset(active, 0, sizeof(*active));
4433 active->pipe_enabled = intel_crtc->active;
4435 if (active->pipe_enabled) {
4436 u32 tmp = hw->wm_pipe[pipe];
4439 * For active pipes LP0 watermark is marked as
4440 * enabled, and LP1+ watermaks as disabled since
4441 * we can't really reverse compute them in case
4442 * multiple pipes are active.
4444 active->wm[0].enable = true;
4445 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4446 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4447 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4448 active->linetime = hw->wm_linetime[pipe];
4450 int level, max_level = ilk_wm_max_level(dev_priv);
4453 * For inactive pipes, all watermark levels
4454 * should be marked as enabled but zeroed,
4455 * which is what we'd compute them to.
4457 for (level = 0; level <= max_level; level++)
4458 active->wm[level].enable = true;
4461 intel_crtc->wm.active.ilk = *active;
4464 #define _FW_WM(value, plane) \
4465 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4466 #define _FW_WM_VLV(value, plane) \
4467 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4469 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4470 struct vlv_wm_values *wm)
4475 for_each_pipe(dev_priv, pipe) {
4476 tmp = I915_READ(VLV_DDL(pipe));
4478 wm->ddl[pipe].primary =
4479 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4480 wm->ddl[pipe].cursor =
4481 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4482 wm->ddl[pipe].sprite[0] =
4483 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4484 wm->ddl[pipe].sprite[1] =
4485 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4488 tmp = I915_READ(DSPFW1);
4489 wm->sr.plane = _FW_WM(tmp, SR);
4490 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4491 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4492 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4494 tmp = I915_READ(DSPFW2);
4495 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4496 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4497 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4499 tmp = I915_READ(DSPFW3);
4500 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4502 if (IS_CHERRYVIEW(dev_priv)) {
4503 tmp = I915_READ(DSPFW7_CHV);
4504 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4505 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4507 tmp = I915_READ(DSPFW8_CHV);
4508 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4509 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4511 tmp = I915_READ(DSPFW9_CHV);
4512 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4513 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4515 tmp = I915_READ(DSPHOWM);
4516 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4517 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4518 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4519 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4520 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4521 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4522 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4523 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4524 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4525 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4527 tmp = I915_READ(DSPFW7);
4528 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4529 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4531 tmp = I915_READ(DSPHOWM);
4532 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4533 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4534 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4535 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4536 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4537 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4538 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4545 void vlv_wm_get_hw_state(struct drm_device *dev)
4547 struct drm_i915_private *dev_priv = to_i915(dev);
4548 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4549 struct intel_plane *plane;
4553 vlv_read_wm_values(dev_priv, wm);
4555 for_each_intel_plane(dev, plane) {
4556 switch (plane->base.type) {
4558 case DRM_PLANE_TYPE_CURSOR:
4559 plane->wm.fifo_size = 63;
4561 case DRM_PLANE_TYPE_PRIMARY:
4562 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4564 case DRM_PLANE_TYPE_OVERLAY:
4565 sprite = plane->plane;
4566 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4571 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4572 wm->level = VLV_WM_LEVEL_PM2;
4574 if (IS_CHERRYVIEW(dev_priv)) {
4575 mutex_lock(&dev_priv->rps.hw_lock);
4577 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4578 if (val & DSP_MAXFIFO_PM5_ENABLE)
4579 wm->level = VLV_WM_LEVEL_PM5;
4582 * If DDR DVFS is disabled in the BIOS, Punit
4583 * will never ack the request. So if that happens
4584 * assume we don't have to enable/disable DDR DVFS
4585 * dynamically. To test that just set the REQ_ACK
4586 * bit to poke the Punit, but don't change the
4587 * HIGH/LOW bits so that we don't actually change
4588 * the current state.
4590 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4591 val |= FORCE_DDR_FREQ_REQ_ACK;
4592 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4594 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4595 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4596 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4597 "assuming DDR DVFS is disabled\n");
4598 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4600 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4601 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4602 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4605 mutex_unlock(&dev_priv->rps.hw_lock);
4608 for_each_pipe(dev_priv, pipe)
4609 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4610 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4611 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4613 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4614 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4617 void ilk_wm_get_hw_state(struct drm_device *dev)
4619 struct drm_i915_private *dev_priv = to_i915(dev);
4620 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4621 struct drm_crtc *crtc;
4623 for_each_crtc(dev, crtc)
4624 ilk_pipe_wm_get_hw_state(crtc);
4626 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4627 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4628 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4630 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4631 if (INTEL_INFO(dev)->gen >= 7) {
4632 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4633 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4636 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4637 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4638 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4639 else if (IS_IVYBRIDGE(dev_priv))
4640 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4641 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4644 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4648 * intel_update_watermarks - update FIFO watermark values based on current modes
4650 * Calculate watermark values for the various WM regs based on current mode
4651 * and plane configuration.
4653 * There are several cases to deal with here:
4654 * - normal (i.e. non-self-refresh)
4655 * - self-refresh (SR) mode
4656 * - lines are large relative to FIFO size (buffer can hold up to 2)
4657 * - lines are small relative to FIFO size (buffer can hold more than 2
4658 * lines), so need to account for TLB latency
4660 * The normal calculation is:
4661 * watermark = dotclock * bytes per pixel * latency
4662 * where latency is platform & configuration dependent (we assume pessimal
4665 * The SR calculation is:
4666 * watermark = (trunc(latency/line time)+1) * surface width *
4669 * line time = htotal / dotclock
4670 * surface width = hdisplay for normal plane and 64 for cursor
4671 * and latency is assumed to be high, as above.
4673 * The final value programmed to the register should always be rounded up,
4674 * and include an extra 2 entries to account for clock crossings.
4676 * We don't use the sprite, so we can ignore that. And on Crestline we have
4677 * to set the non-SR watermarks to 8.
4679 void intel_update_watermarks(struct intel_crtc *crtc)
4681 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4683 if (dev_priv->display.update_wm)
4684 dev_priv->display.update_wm(crtc);
4688 * Lock protecting IPS related data structures
4690 DEFINE_SPINLOCK(mchdev_lock);
4692 /* Global for IPS driver to get at the current i915 device. Protected by
4694 static struct drm_i915_private *i915_mch_dev;
4696 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4700 assert_spin_locked(&mchdev_lock);
4702 rgvswctl = I915_READ16(MEMSWCTL);
4703 if (rgvswctl & MEMCTL_CMD_STS) {
4704 DRM_DEBUG("gpu busy, RCS change rejected\n");
4705 return false; /* still busy with another command */
4708 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4709 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4710 I915_WRITE16(MEMSWCTL, rgvswctl);
4711 POSTING_READ16(MEMSWCTL);
4713 rgvswctl |= MEMCTL_CMD_STS;
4714 I915_WRITE16(MEMSWCTL, rgvswctl);
4719 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4722 u8 fmax, fmin, fstart, vstart;
4724 spin_lock_irq(&mchdev_lock);
4726 rgvmodectl = I915_READ(MEMMODECTL);
4728 /* Enable temp reporting */
4729 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4730 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4732 /* 100ms RC evaluation intervals */
4733 I915_WRITE(RCUPEI, 100000);
4734 I915_WRITE(RCDNEI, 100000);
4736 /* Set max/min thresholds to 90ms and 80ms respectively */
4737 I915_WRITE(RCBMAXAVG, 90000);
4738 I915_WRITE(RCBMINAVG, 80000);
4740 I915_WRITE(MEMIHYST, 1);
4742 /* Set up min, max, and cur for interrupt handling */
4743 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4744 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4745 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4746 MEMMODE_FSTART_SHIFT;
4748 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4751 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4752 dev_priv->ips.fstart = fstart;
4754 dev_priv->ips.max_delay = fstart;
4755 dev_priv->ips.min_delay = fmin;
4756 dev_priv->ips.cur_delay = fstart;
4758 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4759 fmax, fmin, fstart);
4761 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4764 * Interrupts will be enabled in ironlake_irq_postinstall
4767 I915_WRITE(VIDSTART, vstart);
4768 POSTING_READ(VIDSTART);
4770 rgvmodectl |= MEMMODE_SWMODE_EN;
4771 I915_WRITE(MEMMODECTL, rgvmodectl);
4773 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4774 DRM_ERROR("stuck trying to change perf mode\n");
4777 ironlake_set_drps(dev_priv, fstart);
4779 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4780 I915_READ(DDREC) + I915_READ(CSIEC);
4781 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4782 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4783 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4785 spin_unlock_irq(&mchdev_lock);
4788 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4792 spin_lock_irq(&mchdev_lock);
4794 rgvswctl = I915_READ16(MEMSWCTL);
4796 /* Ack interrupts, disable EFC interrupt */
4797 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4798 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4799 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4800 I915_WRITE(DEIIR, DE_PCU_EVENT);
4801 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4803 /* Go back to the starting frequency */
4804 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4806 rgvswctl |= MEMCTL_CMD_STS;
4807 I915_WRITE(MEMSWCTL, rgvswctl);
4810 spin_unlock_irq(&mchdev_lock);
4813 /* There's a funny hw issue where the hw returns all 0 when reading from
4814 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4815 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4816 * all limits and the gpu stuck at whatever frequency it is at atm).
4818 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4822 /* Only set the down limit when we've reached the lowest level to avoid
4823 * getting more interrupts, otherwise leave this clear. This prevents a
4824 * race in the hw when coming out of rc6: There's a tiny window where
4825 * the hw runs at the minimal clock before selecting the desired
4826 * frequency, if the down threshold expires in that window we will not
4827 * receive a down interrupt. */
4828 if (IS_GEN9(dev_priv)) {
4829 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4830 if (val <= dev_priv->rps.min_freq_softlimit)
4831 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4833 limits = dev_priv->rps.max_freq_softlimit << 24;
4834 if (val <= dev_priv->rps.min_freq_softlimit)
4835 limits |= dev_priv->rps.min_freq_softlimit << 16;
4841 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4844 u32 threshold_up = 0, threshold_down = 0; /* in % */
4845 u32 ei_up = 0, ei_down = 0;
4847 new_power = dev_priv->rps.power;
4848 switch (dev_priv->rps.power) {
4850 if (val > dev_priv->rps.efficient_freq + 1 &&
4851 val > dev_priv->rps.cur_freq)
4852 new_power = BETWEEN;
4856 if (val <= dev_priv->rps.efficient_freq &&
4857 val < dev_priv->rps.cur_freq)
4858 new_power = LOW_POWER;
4859 else if (val >= dev_priv->rps.rp0_freq &&
4860 val > dev_priv->rps.cur_freq)
4861 new_power = HIGH_POWER;
4865 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4866 val < dev_priv->rps.cur_freq)
4867 new_power = BETWEEN;
4870 /* Max/min bins are special */
4871 if (val <= dev_priv->rps.min_freq_softlimit)
4872 new_power = LOW_POWER;
4873 if (val >= dev_priv->rps.max_freq_softlimit)
4874 new_power = HIGH_POWER;
4875 if (new_power == dev_priv->rps.power)
4878 /* Note the units here are not exactly 1us, but 1280ns. */
4879 switch (new_power) {
4881 /* Upclock if more than 95% busy over 16ms */
4885 /* Downclock if less than 85% busy over 32ms */
4887 threshold_down = 85;
4891 /* Upclock if more than 90% busy over 13ms */
4895 /* Downclock if less than 75% busy over 32ms */
4897 threshold_down = 75;
4901 /* Upclock if more than 85% busy over 10ms */
4905 /* Downclock if less than 60% busy over 32ms */
4907 threshold_down = 60;
4911 I915_WRITE(GEN6_RP_UP_EI,
4912 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4913 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4914 GT_INTERVAL_FROM_US(dev_priv,
4915 ei_up * threshold_up / 100));
4917 I915_WRITE(GEN6_RP_DOWN_EI,
4918 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4919 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4920 GT_INTERVAL_FROM_US(dev_priv,
4921 ei_down * threshold_down / 100));
4923 I915_WRITE(GEN6_RP_CONTROL,
4924 GEN6_RP_MEDIA_TURBO |
4925 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4926 GEN6_RP_MEDIA_IS_GFX |
4928 GEN6_RP_UP_BUSY_AVG |
4929 GEN6_RP_DOWN_IDLE_AVG);
4931 dev_priv->rps.power = new_power;
4932 dev_priv->rps.up_threshold = threshold_up;
4933 dev_priv->rps.down_threshold = threshold_down;
4934 dev_priv->rps.last_adj = 0;
4937 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4941 if (val > dev_priv->rps.min_freq_softlimit)
4942 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4943 if (val < dev_priv->rps.max_freq_softlimit)
4944 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4946 mask &= dev_priv->pm_rps_events;
4948 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4951 /* gen6_set_rps is called to update the frequency request, but should also be
4952 * called when the range (min_delay and max_delay) is modified so that we can
4953 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4954 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4956 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4957 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4960 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4961 WARN_ON(val > dev_priv->rps.max_freq);
4962 WARN_ON(val < dev_priv->rps.min_freq);
4964 /* min/max delay may still have been modified so be sure to
4965 * write the limits value.
4967 if (val != dev_priv->rps.cur_freq) {
4968 gen6_set_rps_thresholds(dev_priv, val);
4970 if (IS_GEN9(dev_priv))
4971 I915_WRITE(GEN6_RPNSWREQ,
4972 GEN9_FREQUENCY(val));
4973 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4974 I915_WRITE(GEN6_RPNSWREQ,
4975 HSW_FREQUENCY(val));
4977 I915_WRITE(GEN6_RPNSWREQ,
4978 GEN6_FREQUENCY(val) |
4980 GEN6_AGGRESSIVE_TURBO);
4983 /* Make sure we continue to get interrupts
4984 * until we hit the minimum or maximum frequencies.
4986 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4987 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4989 POSTING_READ(GEN6_RPNSWREQ);
4991 dev_priv->rps.cur_freq = val;
4992 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4995 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4997 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4998 WARN_ON(val > dev_priv->rps.max_freq);
4999 WARN_ON(val < dev_priv->rps.min_freq);
5001 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
5002 "Odd GPU freq value\n"))
5005 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5007 if (val != dev_priv->rps.cur_freq) {
5008 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
5009 if (!IS_CHERRYVIEW(dev_priv))
5010 gen6_set_rps_thresholds(dev_priv, val);
5013 dev_priv->rps.cur_freq = val;
5014 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5017 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
5019 * * If Gfx is Idle, then
5020 * 1. Forcewake Media well.
5021 * 2. Request idle freq.
5022 * 3. Release Forcewake of Media well.
5024 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
5026 u32 val = dev_priv->rps.idle_freq;
5028 if (dev_priv->rps.cur_freq <= val)
5031 /* Wake up the media well, as that takes a lot less
5032 * power than the Render well. */
5033 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
5034 valleyview_set_rps(dev_priv, val);
5035 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
5038 void gen6_rps_busy(struct drm_i915_private *dev_priv)
5040 mutex_lock(&dev_priv->rps.hw_lock);
5041 if (dev_priv->rps.enabled) {
5042 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
5043 gen6_rps_reset_ei(dev_priv);
5044 I915_WRITE(GEN6_PMINTRMSK,
5045 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
5047 gen6_enable_rps_interrupts(dev_priv);
5049 /* Ensure we start at the user's desired frequency */
5050 intel_set_rps(dev_priv,
5051 clamp(dev_priv->rps.cur_freq,
5052 dev_priv->rps.min_freq_softlimit,
5053 dev_priv->rps.max_freq_softlimit));
5055 mutex_unlock(&dev_priv->rps.hw_lock);
5058 void gen6_rps_idle(struct drm_i915_private *dev_priv)
5060 /* Flush our bottom-half so that it does not race with us
5061 * setting the idle frequency and so that it is bounded by
5062 * our rpm wakeref. And then disable the interrupts to stop any
5063 * futher RPS reclocking whilst we are asleep.
5065 gen6_disable_rps_interrupts(dev_priv);
5067 mutex_lock(&dev_priv->rps.hw_lock);
5068 if (dev_priv->rps.enabled) {
5069 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5070 vlv_set_rps_idle(dev_priv);
5072 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5073 dev_priv->rps.last_adj = 0;
5074 I915_WRITE(GEN6_PMINTRMSK,
5075 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
5077 mutex_unlock(&dev_priv->rps.hw_lock);
5079 spin_lock(&dev_priv->rps.client_lock);
5080 while (!list_empty(&dev_priv->rps.clients))
5081 list_del_init(dev_priv->rps.clients.next);
5082 spin_unlock(&dev_priv->rps.client_lock);
5085 void gen6_rps_boost(struct drm_i915_private *dev_priv,
5086 struct intel_rps_client *rps,
5087 unsigned long submitted)
5089 /* This is intentionally racy! We peek at the state here, then
5090 * validate inside the RPS worker.
5092 if (!(dev_priv->gt.awake &&
5093 dev_priv->rps.enabled &&
5094 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
5097 /* Force a RPS boost (and don't count it against the client) if
5098 * the GPU is severely congested.
5100 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
5103 spin_lock(&dev_priv->rps.client_lock);
5104 if (rps == NULL || list_empty(&rps->link)) {
5105 spin_lock_irq(&dev_priv->irq_lock);
5106 if (dev_priv->rps.interrupts_enabled) {
5107 dev_priv->rps.client_boost = true;
5108 schedule_work(&dev_priv->rps.work);
5110 spin_unlock_irq(&dev_priv->irq_lock);
5113 list_add(&rps->link, &dev_priv->rps.clients);
5116 dev_priv->rps.boosts++;
5118 spin_unlock(&dev_priv->rps.client_lock);
5121 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
5123 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5124 valleyview_set_rps(dev_priv, val);
5126 gen6_set_rps(dev_priv, val);
5129 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
5131 I915_WRITE(GEN6_RC_CONTROL, 0);
5132 I915_WRITE(GEN9_PG_ENABLE, 0);
5135 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
5137 I915_WRITE(GEN6_RP_CONTROL, 0);
5140 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
5142 I915_WRITE(GEN6_RC_CONTROL, 0);
5143 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
5144 I915_WRITE(GEN6_RP_CONTROL, 0);
5147 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
5149 I915_WRITE(GEN6_RC_CONTROL, 0);
5152 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
5154 /* we're doing forcewake before Disabling RC6,
5155 * This what the BIOS expects when going into suspend */
5156 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5158 I915_WRITE(GEN6_RC_CONTROL, 0);
5160 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5163 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
5165 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5166 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
5167 mode = GEN6_RC_CTL_RC6_ENABLE;
5171 if (HAS_RC6p(dev_priv))
5172 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5173 "RC6 %s RC6p %s RC6pp %s\n",
5174 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5175 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5176 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
5179 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5180 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5183 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5185 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5186 bool enable_rc6 = true;
5187 unsigned long rc6_ctx_base;
5191 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5192 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5193 RC_SW_TARGET_STATE_SHIFT;
5194 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5195 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5196 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5197 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5200 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5201 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5206 * The exact context size is not known for BXT, so assume a page size
5209 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5210 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5211 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5212 ggtt->stolen_reserved_size))) {
5213 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5217 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5218 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5219 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5220 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5221 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5225 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5226 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5227 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5228 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5232 if (!I915_READ(GEN6_GFXPAUSE)) {
5233 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5237 if (!I915_READ(GEN8_MISC_CTRL0)) {
5238 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5245 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5247 /* No RC6 before Ironlake and code is gone for ilk. */
5248 if (INTEL_INFO(dev_priv)->gen < 6)
5254 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5255 DRM_INFO("RC6 disabled by BIOS\n");
5259 /* Respect the kernel parameter if it is set */
5260 if (enable_rc6 >= 0) {
5263 if (HAS_RC6p(dev_priv))
5264 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5267 mask = INTEL_RC6_ENABLE;
5269 if ((enable_rc6 & mask) != enable_rc6)
5270 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5271 "(requested %d, valid %d)\n",
5272 enable_rc6 & mask, enable_rc6, mask);
5274 return enable_rc6 & mask;
5277 if (IS_IVYBRIDGE(dev_priv))
5278 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5280 return INTEL_RC6_ENABLE;
5283 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5285 /* All of these values are in units of 50MHz */
5287 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5288 if (IS_BROXTON(dev_priv)) {
5289 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5290 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5291 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5292 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5294 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5295 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5296 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5297 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5299 /* hw_max = RP0 until we check for overclocking */
5300 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5302 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5303 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5304 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5305 u32 ddcc_status = 0;
5307 if (sandybridge_pcode_read(dev_priv,
5308 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5310 dev_priv->rps.efficient_freq =
5312 ((ddcc_status >> 8) & 0xff),
5313 dev_priv->rps.min_freq,
5314 dev_priv->rps.max_freq);
5317 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5318 /* Store the frequency values in 16.66 MHZ units, which is
5319 * the natural hardware unit for SKL
5321 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5322 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5323 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5324 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5325 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5329 static void reset_rps(struct drm_i915_private *dev_priv,
5330 void (*set)(struct drm_i915_private *, u8))
5332 u8 freq = dev_priv->rps.cur_freq;
5335 dev_priv->rps.power = -1;
5336 dev_priv->rps.cur_freq = -1;
5338 set(dev_priv, freq);
5341 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5342 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5344 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5346 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5347 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5349 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5350 * clear out the Control register just to avoid inconsitency
5351 * with debugfs interface, which will show Turbo as enabled
5352 * only and that is not expected by the User after adding the
5353 * WaGsvDisableTurbo. Apart from this there is no problem even
5354 * if the Turbo is left enabled in the Control register, as the
5355 * Up/Down interrupts would remain masked.
5357 gen9_disable_rps(dev_priv);
5358 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5362 /* Program defaults and thresholds for RPS*/
5363 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5364 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5366 /* 1 second timeout*/
5367 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5368 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5370 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5372 /* Leaning on the below call to gen6_set_rps to program/setup the
5373 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5374 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5375 reset_rps(dev_priv, gen6_set_rps);
5377 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5380 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5382 struct intel_engine_cs *engine;
5383 enum intel_engine_id id;
5384 uint32_t rc6_mask = 0;
5386 /* 1a: Software RC state - RC0 */
5387 I915_WRITE(GEN6_RC_STATE, 0);
5389 /* 1b: Get forcewake during program sequence. Although the driver
5390 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5391 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5393 /* 2a: Disable RC states. */
5394 I915_WRITE(GEN6_RC_CONTROL, 0);
5396 /* 2b: Program RC6 thresholds.*/
5398 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5399 if (IS_SKYLAKE(dev_priv))
5400 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5402 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5403 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5404 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5405 for_each_engine(engine, dev_priv, id)
5406 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5408 if (HAS_GUC(dev_priv))
5409 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5411 I915_WRITE(GEN6_RC_SLEEP, 0);
5413 /* 2c: Program Coarse Power Gating Policies. */
5414 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5415 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5417 /* 3a: Enable RC6 */
5418 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5419 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5420 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5421 /* WaRsUseTimeoutMode:bxt */
5422 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5423 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
5424 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5425 GEN7_RC_CTL_TO_MODE |
5428 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5429 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5430 GEN6_RC_CTL_EI_MODE(1) |
5435 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5436 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5438 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5439 I915_WRITE(GEN9_PG_ENABLE, 0);
5441 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5442 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5444 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5447 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5449 struct intel_engine_cs *engine;
5450 enum intel_engine_id id;
5451 uint32_t rc6_mask = 0;
5453 /* 1a: Software RC state - RC0 */
5454 I915_WRITE(GEN6_RC_STATE, 0);
5456 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5457 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5458 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5460 /* 2a: Disable RC states. */
5461 I915_WRITE(GEN6_RC_CONTROL, 0);
5463 /* 2b: Program RC6 thresholds.*/
5464 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5465 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5466 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5467 for_each_engine(engine, dev_priv, id)
5468 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5469 I915_WRITE(GEN6_RC_SLEEP, 0);
5470 if (IS_BROADWELL(dev_priv))
5471 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5473 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5476 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5477 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5478 intel_print_rc6_info(dev_priv, rc6_mask);
5479 if (IS_BROADWELL(dev_priv))
5480 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5481 GEN7_RC_CTL_TO_MODE |
5484 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5485 GEN6_RC_CTL_EI_MODE(1) |
5488 /* 4 Program defaults and thresholds for RPS*/
5489 I915_WRITE(GEN6_RPNSWREQ,
5490 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5491 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5492 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5493 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5494 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5496 /* Docs recommend 900MHz, and 300 MHz respectively */
5497 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5498 dev_priv->rps.max_freq_softlimit << 24 |
5499 dev_priv->rps.min_freq_softlimit << 16);
5501 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5502 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5503 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5504 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5506 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5509 I915_WRITE(GEN6_RP_CONTROL,
5510 GEN6_RP_MEDIA_TURBO |
5511 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5512 GEN6_RP_MEDIA_IS_GFX |
5514 GEN6_RP_UP_BUSY_AVG |
5515 GEN6_RP_DOWN_IDLE_AVG);
5517 /* 6: Ring frequency + overclocking (our driver does this later */
5519 reset_rps(dev_priv, gen6_set_rps);
5521 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5524 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5526 struct intel_engine_cs *engine;
5527 enum intel_engine_id id;
5528 u32 rc6vids, rc6_mask = 0;
5533 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5535 /* Here begins a magic sequence of register writes to enable
5536 * auto-downclocking.
5538 * Perhaps there might be some value in exposing these to
5541 I915_WRITE(GEN6_RC_STATE, 0);
5543 /* Clear the DBG now so we don't confuse earlier errors */
5544 gtfifodbg = I915_READ(GTFIFODBG);
5546 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5547 I915_WRITE(GTFIFODBG, gtfifodbg);
5550 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5552 /* disable the counters and set deterministic thresholds */
5553 I915_WRITE(GEN6_RC_CONTROL, 0);
5555 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5556 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5557 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5558 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5559 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5561 for_each_engine(engine, dev_priv, id)
5562 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5564 I915_WRITE(GEN6_RC_SLEEP, 0);
5565 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5566 if (IS_IVYBRIDGE(dev_priv))
5567 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5569 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5570 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5571 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5573 /* Check if we are enabling RC6 */
5574 rc6_mode = intel_enable_rc6();
5575 if (rc6_mode & INTEL_RC6_ENABLE)
5576 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5578 /* We don't use those on Haswell */
5579 if (!IS_HASWELL(dev_priv)) {
5580 if (rc6_mode & INTEL_RC6p_ENABLE)
5581 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5583 if (rc6_mode & INTEL_RC6pp_ENABLE)
5584 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5587 intel_print_rc6_info(dev_priv, rc6_mask);
5589 I915_WRITE(GEN6_RC_CONTROL,
5591 GEN6_RC_CTL_EI_MODE(1) |
5592 GEN6_RC_CTL_HW_ENABLE);
5594 /* Power down if completely idle for over 50ms */
5595 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5596 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5598 reset_rps(dev_priv, gen6_set_rps);
5601 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5602 if (IS_GEN6(dev_priv) && ret) {
5603 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5604 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5605 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5606 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5607 rc6vids &= 0xffff00;
5608 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5609 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5611 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5614 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5617 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5620 unsigned int gpu_freq;
5621 unsigned int max_ia_freq, min_ring_freq;
5622 unsigned int max_gpu_freq, min_gpu_freq;
5623 int scaling_factor = 180;
5624 struct cpufreq_policy *policy;
5626 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5628 policy = cpufreq_cpu_get(0);
5630 max_ia_freq = policy->cpuinfo.max_freq;
5631 cpufreq_cpu_put(policy);
5634 * Default to measured freq if none found, PCU will ensure we
5637 max_ia_freq = tsc_khz;
5640 /* Convert from kHz to MHz */
5641 max_ia_freq /= 1000;
5643 min_ring_freq = I915_READ(DCLK) & 0xf;
5644 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5645 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5647 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5648 /* Convert GT frequency to 50 HZ units */
5649 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5650 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5652 min_gpu_freq = dev_priv->rps.min_freq;
5653 max_gpu_freq = dev_priv->rps.max_freq;
5657 * For each potential GPU frequency, load a ring frequency we'd like
5658 * to use for memory access. We do this by specifying the IA frequency
5659 * the PCU should use as a reference to determine the ring frequency.
5661 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5662 int diff = max_gpu_freq - gpu_freq;
5663 unsigned int ia_freq = 0, ring_freq = 0;
5665 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5667 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5668 * No floor required for ring frequency on SKL.
5670 ring_freq = gpu_freq;
5671 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5672 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5673 ring_freq = max(min_ring_freq, gpu_freq);
5674 } else if (IS_HASWELL(dev_priv)) {
5675 ring_freq = mult_frac(gpu_freq, 5, 4);
5676 ring_freq = max(min_ring_freq, ring_freq);
5677 /* leave ia_freq as the default, chosen by cpufreq */
5679 /* On older processors, there is no separate ring
5680 * clock domain, so in order to boost the bandwidth
5681 * of the ring, we need to upclock the CPU (ia_freq).
5683 * For GPU frequencies less than 750MHz,
5684 * just use the lowest ring freq.
5686 if (gpu_freq < min_freq)
5689 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5690 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5693 sandybridge_pcode_write(dev_priv,
5694 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5695 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5696 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5701 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5705 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5707 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
5709 /* (2 * 4) config */
5710 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5713 /* (2 * 6) config */
5714 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5717 /* (2 * 8) config */
5719 /* Setting (2 * 8) Min RP0 for any other combination */
5720 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5724 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5729 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5733 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5734 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5739 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5743 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5744 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5749 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5753 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5755 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5760 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5764 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5766 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5768 rp0 = min_t(u32, rp0, 0xea);
5773 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5777 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5778 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5779 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5780 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5785 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5789 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5791 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5792 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5793 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5794 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5795 * to make sure it matches what Punit accepts.
5797 return max_t(u32, val, 0xc0);
5800 /* Check that the pctx buffer wasn't move under us. */
5801 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5803 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5805 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5806 dev_priv->vlv_pctx->stolen->start);
5810 /* Check that the pcbr address is not empty. */
5811 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5813 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5815 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5818 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5820 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5821 unsigned long pctx_paddr, paddr;
5823 int pctx_size = 32*1024;
5825 pcbr = I915_READ(VLV_PCBR);
5826 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5827 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5828 paddr = (dev_priv->mm.stolen_base +
5829 (ggtt->stolen_size - pctx_size));
5831 pctx_paddr = (paddr & (~4095));
5832 I915_WRITE(VLV_PCBR, pctx_paddr);
5835 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5838 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5840 struct drm_i915_gem_object *pctx;
5841 unsigned long pctx_paddr;
5843 int pctx_size = 24*1024;
5845 pcbr = I915_READ(VLV_PCBR);
5847 /* BIOS set it up already, grab the pre-alloc'd space */
5850 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5851 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5853 I915_GTT_OFFSET_NONE,
5858 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5861 * From the Gunit register HAS:
5862 * The Gfx driver is expected to program this register and ensure
5863 * proper allocation within Gfx stolen memory. For example, this
5864 * register should be programmed such than the PCBR range does not
5865 * overlap with other ranges, such as the frame buffer, protected
5866 * memory, or any other relevant ranges.
5868 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5870 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5874 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5875 I915_WRITE(VLV_PCBR, pctx_paddr);
5878 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5879 dev_priv->vlv_pctx = pctx;
5882 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5884 if (WARN_ON(!dev_priv->vlv_pctx))
5887 i915_gem_object_put(dev_priv->vlv_pctx);
5888 dev_priv->vlv_pctx = NULL;
5891 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5893 dev_priv->rps.gpll_ref_freq =
5894 vlv_get_cck_clock(dev_priv, "GPLL ref",
5895 CCK_GPLL_CLOCK_CONTROL,
5896 dev_priv->czclk_freq);
5898 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5899 dev_priv->rps.gpll_ref_freq);
5902 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5906 valleyview_setup_pctx(dev_priv);
5908 vlv_init_gpll_ref_freq(dev_priv);
5910 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5911 switch ((val >> 6) & 3) {
5914 dev_priv->mem_freq = 800;
5917 dev_priv->mem_freq = 1066;
5920 dev_priv->mem_freq = 1333;
5923 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5925 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5926 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5927 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5928 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5929 dev_priv->rps.max_freq);
5931 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5932 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5933 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5934 dev_priv->rps.efficient_freq);
5936 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5937 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5938 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5939 dev_priv->rps.rp1_freq);
5941 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5942 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5943 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5944 dev_priv->rps.min_freq);
5947 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5951 cherryview_setup_pctx(dev_priv);
5953 vlv_init_gpll_ref_freq(dev_priv);
5955 mutex_lock(&dev_priv->sb_lock);
5956 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5957 mutex_unlock(&dev_priv->sb_lock);
5959 switch ((val >> 2) & 0x7) {
5961 dev_priv->mem_freq = 2000;
5964 dev_priv->mem_freq = 1600;
5967 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5969 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5970 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5971 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5972 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5973 dev_priv->rps.max_freq);
5975 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5976 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5977 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5978 dev_priv->rps.efficient_freq);
5980 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5981 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5982 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5983 dev_priv->rps.rp1_freq);
5985 /* PUnit validated range is only [RPe, RP0] */
5986 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5987 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5988 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5989 dev_priv->rps.min_freq);
5991 WARN_ONCE((dev_priv->rps.max_freq |
5992 dev_priv->rps.efficient_freq |
5993 dev_priv->rps.rp1_freq |
5994 dev_priv->rps.min_freq) & 1,
5995 "Odd GPU freq values\n");
5998 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6000 valleyview_cleanup_pctx(dev_priv);
6003 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
6005 struct intel_engine_cs *engine;
6006 enum intel_engine_id id;
6007 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
6009 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6011 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
6012 GT_FIFO_FREE_ENTRIES_CHV);
6014 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6016 I915_WRITE(GTFIFODBG, gtfifodbg);
6019 cherryview_check_pctx(dev_priv);
6021 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6022 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6023 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6025 /* Disable RC states. */
6026 I915_WRITE(GEN6_RC_CONTROL, 0);
6028 /* 2a: Program RC6 thresholds.*/
6029 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6030 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6031 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6033 for_each_engine(engine, dev_priv, id)
6034 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6035 I915_WRITE(GEN6_RC_SLEEP, 0);
6037 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6038 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
6040 /* allows RC6 residency counter to work */
6041 I915_WRITE(VLV_COUNTER_CONTROL,
6042 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
6043 VLV_MEDIA_RC6_COUNT_EN |
6044 VLV_RENDER_RC6_COUNT_EN));
6046 /* For now we assume BIOS is allocating and populating the PCBR */
6047 pcbr = I915_READ(VLV_PCBR);
6050 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
6051 (pcbr >> VLV_PCBR_ADDR_SHIFT))
6052 rc6_mode = GEN7_RC_CTL_TO_MODE;
6054 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6056 /* 4 Program defaults and thresholds for RPS*/
6057 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6058 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6059 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6060 I915_WRITE(GEN6_RP_UP_EI, 66000);
6061 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6063 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6066 I915_WRITE(GEN6_RP_CONTROL,
6067 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6068 GEN6_RP_MEDIA_IS_GFX |
6070 GEN6_RP_UP_BUSY_AVG |
6071 GEN6_RP_DOWN_IDLE_AVG);
6073 /* Setting Fixed Bias */
6074 val = VLV_OVERRIDE_EN |
6076 CHV_BIAS_CPU_50_SOC_50;
6077 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6079 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6081 /* RPS code assumes GPLL is used */
6082 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6084 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6085 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6087 reset_rps(dev_priv, valleyview_set_rps);
6089 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6092 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
6094 struct intel_engine_cs *engine;
6095 enum intel_engine_id id;
6096 u32 gtfifodbg, val, rc6_mode = 0;
6098 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6100 valleyview_check_pctx(dev_priv);
6102 gtfifodbg = I915_READ(GTFIFODBG);
6104 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6106 I915_WRITE(GTFIFODBG, gtfifodbg);
6109 /* If VLV, Forcewake all wells, else re-direct to regular path */
6110 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6112 /* Disable RC states. */
6113 I915_WRITE(GEN6_RC_CONTROL, 0);
6115 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6116 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6117 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6118 I915_WRITE(GEN6_RP_UP_EI, 66000);
6119 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6121 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6123 I915_WRITE(GEN6_RP_CONTROL,
6124 GEN6_RP_MEDIA_TURBO |
6125 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6126 GEN6_RP_MEDIA_IS_GFX |
6128 GEN6_RP_UP_BUSY_AVG |
6129 GEN6_RP_DOWN_IDLE_CONT);
6131 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
6132 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6133 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6135 for_each_engine(engine, dev_priv, id)
6136 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6138 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
6140 /* allows RC6 residency counter to work */
6141 I915_WRITE(VLV_COUNTER_CONTROL,
6142 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
6143 VLV_RENDER_RC0_COUNT_EN |
6144 VLV_MEDIA_RC6_COUNT_EN |
6145 VLV_RENDER_RC6_COUNT_EN));
6147 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6148 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
6150 intel_print_rc6_info(dev_priv, rc6_mode);
6152 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6154 /* Setting Fixed Bias */
6155 val = VLV_OVERRIDE_EN |
6157 VLV_BIAS_CPU_125_SOC_875;
6158 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6160 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6162 /* RPS code assumes GPLL is used */
6163 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6165 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6166 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6168 reset_rps(dev_priv, valleyview_set_rps);
6170 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6173 static unsigned long intel_pxfreq(u32 vidfreq)
6176 int div = (vidfreq & 0x3f0000) >> 16;
6177 int post = (vidfreq & 0x3000) >> 12;
6178 int pre = (vidfreq & 0x7);
6183 freq = ((div * 133333) / ((1<<post) * pre));
6188 static const struct cparams {
6194 { 1, 1333, 301, 28664 },
6195 { 1, 1066, 294, 24460 },
6196 { 1, 800, 294, 25192 },
6197 { 0, 1333, 276, 27605 },
6198 { 0, 1066, 276, 27605 },
6199 { 0, 800, 231, 23784 },
6202 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6204 u64 total_count, diff, ret;
6205 u32 count1, count2, count3, m = 0, c = 0;
6206 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6209 assert_spin_locked(&mchdev_lock);
6211 diff1 = now - dev_priv->ips.last_time1;
6213 /* Prevent division-by-zero if we are asking too fast.
6214 * Also, we don't get interesting results if we are polling
6215 * faster than once in 10ms, so just return the saved value
6219 return dev_priv->ips.chipset_power;
6221 count1 = I915_READ(DMIEC);
6222 count2 = I915_READ(DDREC);
6223 count3 = I915_READ(CSIEC);
6225 total_count = count1 + count2 + count3;
6227 /* FIXME: handle per-counter overflow */
6228 if (total_count < dev_priv->ips.last_count1) {
6229 diff = ~0UL - dev_priv->ips.last_count1;
6230 diff += total_count;
6232 diff = total_count - dev_priv->ips.last_count1;
6235 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6236 if (cparams[i].i == dev_priv->ips.c_m &&
6237 cparams[i].t == dev_priv->ips.r_t) {
6244 diff = div_u64(diff, diff1);
6245 ret = ((m * diff) + c);
6246 ret = div_u64(ret, 10);
6248 dev_priv->ips.last_count1 = total_count;
6249 dev_priv->ips.last_time1 = now;
6251 dev_priv->ips.chipset_power = ret;
6256 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6260 if (INTEL_INFO(dev_priv)->gen != 5)
6263 spin_lock_irq(&mchdev_lock);
6265 val = __i915_chipset_val(dev_priv);
6267 spin_unlock_irq(&mchdev_lock);
6272 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6274 unsigned long m, x, b;
6277 tsfs = I915_READ(TSFS);
6279 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6280 x = I915_READ8(TR1);
6282 b = tsfs & TSFS_INTR_MASK;
6284 return ((m * x) / 127) - b;
6287 static int _pxvid_to_vd(u8 pxvid)
6292 if (pxvid >= 8 && pxvid < 31)
6295 return (pxvid + 2) * 125;
6298 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6300 const int vd = _pxvid_to_vd(pxvid);
6301 const int vm = vd - 1125;
6303 if (INTEL_INFO(dev_priv)->is_mobile)
6304 return vm > 0 ? vm : 0;
6309 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6311 u64 now, diff, diffms;
6314 assert_spin_locked(&mchdev_lock);
6316 now = ktime_get_raw_ns();
6317 diffms = now - dev_priv->ips.last_time2;
6318 do_div(diffms, NSEC_PER_MSEC);
6320 /* Don't divide by 0 */
6324 count = I915_READ(GFXEC);
6326 if (count < dev_priv->ips.last_count2) {
6327 diff = ~0UL - dev_priv->ips.last_count2;
6330 diff = count - dev_priv->ips.last_count2;
6333 dev_priv->ips.last_count2 = count;
6334 dev_priv->ips.last_time2 = now;
6336 /* More magic constants... */
6338 diff = div_u64(diff, diffms * 10);
6339 dev_priv->ips.gfx_power = diff;
6342 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6344 if (INTEL_INFO(dev_priv)->gen != 5)
6347 spin_lock_irq(&mchdev_lock);
6349 __i915_update_gfx_val(dev_priv);
6351 spin_unlock_irq(&mchdev_lock);
6354 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6356 unsigned long t, corr, state1, corr2, state2;
6359 assert_spin_locked(&mchdev_lock);
6361 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6362 pxvid = (pxvid >> 24) & 0x7f;
6363 ext_v = pvid_to_extvid(dev_priv, pxvid);
6367 t = i915_mch_val(dev_priv);
6369 /* Revel in the empirically derived constants */
6371 /* Correction factor in 1/100000 units */
6373 corr = ((t * 2349) + 135940);
6375 corr = ((t * 964) + 29317);
6377 corr = ((t * 301) + 1004);
6379 corr = corr * ((150142 * state1) / 10000 - 78642);
6381 corr2 = (corr * dev_priv->ips.corr);
6383 state2 = (corr2 * state1) / 10000;
6384 state2 /= 100; /* convert to mW */
6386 __i915_update_gfx_val(dev_priv);
6388 return dev_priv->ips.gfx_power + state2;
6391 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6395 if (INTEL_INFO(dev_priv)->gen != 5)
6398 spin_lock_irq(&mchdev_lock);
6400 val = __i915_gfx_val(dev_priv);
6402 spin_unlock_irq(&mchdev_lock);
6408 * i915_read_mch_val - return value for IPS use
6410 * Calculate and return a value for the IPS driver to use when deciding whether
6411 * we have thermal and power headroom to increase CPU or GPU power budget.
6413 unsigned long i915_read_mch_val(void)
6415 struct drm_i915_private *dev_priv;
6416 unsigned long chipset_val, graphics_val, ret = 0;
6418 spin_lock_irq(&mchdev_lock);
6421 dev_priv = i915_mch_dev;
6423 chipset_val = __i915_chipset_val(dev_priv);
6424 graphics_val = __i915_gfx_val(dev_priv);
6426 ret = chipset_val + graphics_val;
6429 spin_unlock_irq(&mchdev_lock);
6433 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6436 * i915_gpu_raise - raise GPU frequency limit
6438 * Raise the limit; IPS indicates we have thermal headroom.
6440 bool i915_gpu_raise(void)
6442 struct drm_i915_private *dev_priv;
6445 spin_lock_irq(&mchdev_lock);
6446 if (!i915_mch_dev) {
6450 dev_priv = i915_mch_dev;
6452 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6453 dev_priv->ips.max_delay--;
6456 spin_unlock_irq(&mchdev_lock);
6460 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6463 * i915_gpu_lower - lower GPU frequency limit
6465 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6466 * frequency maximum.
6468 bool i915_gpu_lower(void)
6470 struct drm_i915_private *dev_priv;
6473 spin_lock_irq(&mchdev_lock);
6474 if (!i915_mch_dev) {
6478 dev_priv = i915_mch_dev;
6480 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6481 dev_priv->ips.max_delay++;
6484 spin_unlock_irq(&mchdev_lock);
6488 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6491 * i915_gpu_busy - indicate GPU business to IPS
6493 * Tell the IPS driver whether or not the GPU is busy.
6495 bool i915_gpu_busy(void)
6499 spin_lock_irq(&mchdev_lock);
6501 ret = i915_mch_dev->gt.awake;
6502 spin_unlock_irq(&mchdev_lock);
6506 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6509 * i915_gpu_turbo_disable - disable graphics turbo
6511 * Disable graphics turbo by resetting the max frequency and setting the
6512 * current frequency to the default.
6514 bool i915_gpu_turbo_disable(void)
6516 struct drm_i915_private *dev_priv;
6519 spin_lock_irq(&mchdev_lock);
6520 if (!i915_mch_dev) {
6524 dev_priv = i915_mch_dev;
6526 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6528 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6532 spin_unlock_irq(&mchdev_lock);
6536 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6539 * Tells the intel_ips driver that the i915 driver is now loaded, if
6540 * IPS got loaded first.
6542 * This awkward dance is so that neither module has to depend on the
6543 * other in order for IPS to do the appropriate communication of
6544 * GPU turbo limits to i915.
6547 ips_ping_for_i915_load(void)
6551 link = symbol_get(ips_link_to_i915_driver);
6554 symbol_put(ips_link_to_i915_driver);
6558 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6560 /* We only register the i915 ips part with intel-ips once everything is
6561 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6562 spin_lock_irq(&mchdev_lock);
6563 i915_mch_dev = dev_priv;
6564 spin_unlock_irq(&mchdev_lock);
6566 ips_ping_for_i915_load();
6569 void intel_gpu_ips_teardown(void)
6571 spin_lock_irq(&mchdev_lock);
6572 i915_mch_dev = NULL;
6573 spin_unlock_irq(&mchdev_lock);
6576 static void intel_init_emon(struct drm_i915_private *dev_priv)
6582 /* Disable to program */
6586 /* Program energy weights for various events */
6587 I915_WRITE(SDEW, 0x15040d00);
6588 I915_WRITE(CSIEW0, 0x007f0000);
6589 I915_WRITE(CSIEW1, 0x1e220004);
6590 I915_WRITE(CSIEW2, 0x04000004);
6592 for (i = 0; i < 5; i++)
6593 I915_WRITE(PEW(i), 0);
6594 for (i = 0; i < 3; i++)
6595 I915_WRITE(DEW(i), 0);
6597 /* Program P-state weights to account for frequency power adjustment */
6598 for (i = 0; i < 16; i++) {
6599 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6600 unsigned long freq = intel_pxfreq(pxvidfreq);
6601 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6606 val *= (freq / 1000);
6608 val /= (127*127*900);
6610 DRM_ERROR("bad pxval: %ld\n", val);
6613 /* Render standby states get 0 weight */
6617 for (i = 0; i < 4; i++) {
6618 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6619 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6620 I915_WRITE(PXW(i), val);
6623 /* Adjust magic regs to magic values (more experimental results) */
6624 I915_WRITE(OGW0, 0);
6625 I915_WRITE(OGW1, 0);
6626 I915_WRITE(EG0, 0x00007f00);
6627 I915_WRITE(EG1, 0x0000000e);
6628 I915_WRITE(EG2, 0x000e0000);
6629 I915_WRITE(EG3, 0x68000300);
6630 I915_WRITE(EG4, 0x42000000);
6631 I915_WRITE(EG5, 0x00140031);
6635 for (i = 0; i < 8; i++)
6636 I915_WRITE(PXWL(i), 0);
6638 /* Enable PMON + select events */
6639 I915_WRITE(ECR, 0x80000019);
6641 lcfuse = I915_READ(LCFUSE02);
6643 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6646 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6649 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6652 if (!i915.enable_rc6) {
6653 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6654 intel_runtime_pm_get(dev_priv);
6657 mutex_lock(&dev_priv->drm.struct_mutex);
6658 mutex_lock(&dev_priv->rps.hw_lock);
6660 /* Initialize RPS limits (for userspace) */
6661 if (IS_CHERRYVIEW(dev_priv))
6662 cherryview_init_gt_powersave(dev_priv);
6663 else if (IS_VALLEYVIEW(dev_priv))
6664 valleyview_init_gt_powersave(dev_priv);
6665 else if (INTEL_GEN(dev_priv) >= 6)
6666 gen6_init_rps_frequencies(dev_priv);
6668 /* Derive initial user preferences/limits from the hardware limits */
6669 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6670 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6672 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6673 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6675 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6676 dev_priv->rps.min_freq_softlimit =
6678 dev_priv->rps.efficient_freq,
6679 intel_freq_opcode(dev_priv, 450));
6681 /* After setting max-softlimit, find the overclock max freq */
6682 if (IS_GEN6(dev_priv) ||
6683 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6686 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
6687 if (params & BIT(31)) { /* OC supported */
6688 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6689 (dev_priv->rps.max_freq & 0xff) * 50,
6690 (params & 0xff) * 50);
6691 dev_priv->rps.max_freq = params & 0xff;
6695 /* Finally allow us to boost to max by default */
6696 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6698 mutex_unlock(&dev_priv->rps.hw_lock);
6699 mutex_unlock(&dev_priv->drm.struct_mutex);
6701 intel_autoenable_gt_powersave(dev_priv);
6704 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6706 if (IS_VALLEYVIEW(dev_priv))
6707 valleyview_cleanup_gt_powersave(dev_priv);
6709 if (!i915.enable_rc6)
6710 intel_runtime_pm_put(dev_priv);
6714 * intel_suspend_gt_powersave - suspend PM work and helper threads
6715 * @dev_priv: i915 device
6717 * We don't want to disable RC6 or other features here, we just want
6718 * to make sure any work we've queued has finished and won't bother
6719 * us while we're suspended.
6721 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6723 if (INTEL_GEN(dev_priv) < 6)
6726 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6727 intel_runtime_pm_put(dev_priv);
6729 /* gen6_rps_idle() will be called later to disable interrupts */
6732 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6734 dev_priv->rps.enabled = true; /* force disabling */
6735 intel_disable_gt_powersave(dev_priv);
6737 gen6_reset_rps_interrupts(dev_priv);
6740 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6742 if (!READ_ONCE(dev_priv->rps.enabled))
6745 mutex_lock(&dev_priv->rps.hw_lock);
6747 if (INTEL_GEN(dev_priv) >= 9) {
6748 gen9_disable_rc6(dev_priv);
6749 gen9_disable_rps(dev_priv);
6750 } else if (IS_CHERRYVIEW(dev_priv)) {
6751 cherryview_disable_rps(dev_priv);
6752 } else if (IS_VALLEYVIEW(dev_priv)) {
6753 valleyview_disable_rps(dev_priv);
6754 } else if (INTEL_GEN(dev_priv) >= 6) {
6755 gen6_disable_rps(dev_priv);
6756 } else if (IS_IRONLAKE_M(dev_priv)) {
6757 ironlake_disable_drps(dev_priv);
6760 dev_priv->rps.enabled = false;
6761 mutex_unlock(&dev_priv->rps.hw_lock);
6764 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6766 /* We shouldn't be disabling as we submit, so this should be less
6767 * racy than it appears!
6769 if (READ_ONCE(dev_priv->rps.enabled))
6772 /* Powersaving is controlled by the host when inside a VM */
6773 if (intel_vgpu_active(dev_priv))
6776 mutex_lock(&dev_priv->rps.hw_lock);
6778 if (IS_CHERRYVIEW(dev_priv)) {
6779 cherryview_enable_rps(dev_priv);
6780 } else if (IS_VALLEYVIEW(dev_priv)) {
6781 valleyview_enable_rps(dev_priv);
6782 } else if (INTEL_GEN(dev_priv) >= 9) {
6783 gen9_enable_rc6(dev_priv);
6784 gen9_enable_rps(dev_priv);
6785 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6786 gen6_update_ring_freq(dev_priv);
6787 } else if (IS_BROADWELL(dev_priv)) {
6788 gen8_enable_rps(dev_priv);
6789 gen6_update_ring_freq(dev_priv);
6790 } else if (INTEL_GEN(dev_priv) >= 6) {
6791 gen6_enable_rps(dev_priv);
6792 gen6_update_ring_freq(dev_priv);
6793 } else if (IS_IRONLAKE_M(dev_priv)) {
6794 ironlake_enable_drps(dev_priv);
6795 intel_init_emon(dev_priv);
6798 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6799 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6801 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6802 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6804 dev_priv->rps.enabled = true;
6805 mutex_unlock(&dev_priv->rps.hw_lock);
6808 static void __intel_autoenable_gt_powersave(struct work_struct *work)
6810 struct drm_i915_private *dev_priv =
6811 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6812 struct intel_engine_cs *rcs;
6813 struct drm_i915_gem_request *req;
6815 if (READ_ONCE(dev_priv->rps.enabled))
6818 rcs = dev_priv->engine[RCS];
6819 if (rcs->last_context)
6822 if (!rcs->init_context)
6825 mutex_lock(&dev_priv->drm.struct_mutex);
6827 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6831 if (!i915.enable_execlists && i915_switch_context(req) == 0)
6832 rcs->init_context(req);
6834 /* Mark the device busy, calling intel_enable_gt_powersave() */
6835 i915_add_request_no_flush(req);
6838 mutex_unlock(&dev_priv->drm.struct_mutex);
6840 intel_runtime_pm_put(dev_priv);
6843 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6845 if (READ_ONCE(dev_priv->rps.enabled))
6848 if (IS_IRONLAKE_M(dev_priv)) {
6849 ironlake_enable_drps(dev_priv);
6850 intel_init_emon(dev_priv);
6851 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6853 * PCU communication is slow and this doesn't need to be
6854 * done at any specific time, so do this out of our fast path
6855 * to make resume and init faster.
6857 * We depend on the HW RC6 power context save/restore
6858 * mechanism when entering D3 through runtime PM suspend. So
6859 * disable RPM until RPS/RC6 is properly setup. We can only
6860 * get here via the driver load/system resume/runtime resume
6861 * paths, so the _noresume version is enough (and in case of
6862 * runtime resume it's necessary).
6864 if (queue_delayed_work(dev_priv->wq,
6865 &dev_priv->rps.autoenable_work,
6866 round_jiffies_up_relative(HZ)))
6867 intel_runtime_pm_get_noresume(dev_priv);
6871 static void ibx_init_clock_gating(struct drm_device *dev)
6873 struct drm_i915_private *dev_priv = to_i915(dev);
6876 * On Ibex Peak and Cougar Point, we need to disable clock
6877 * gating for the panel power sequencer or it will fail to
6878 * start up when no ports are active.
6880 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6883 static void g4x_disable_trickle_feed(struct drm_device *dev)
6885 struct drm_i915_private *dev_priv = to_i915(dev);
6888 for_each_pipe(dev_priv, pipe) {
6889 I915_WRITE(DSPCNTR(pipe),
6890 I915_READ(DSPCNTR(pipe)) |
6891 DISPPLANE_TRICKLE_FEED_DISABLE);
6893 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6894 POSTING_READ(DSPSURF(pipe));
6898 static void ilk_init_lp_watermarks(struct drm_device *dev)
6900 struct drm_i915_private *dev_priv = to_i915(dev);
6902 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6903 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6904 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6907 * Don't touch WM1S_LP_EN here.
6908 * Doing so could cause underruns.
6912 static void ironlake_init_clock_gating(struct drm_device *dev)
6914 struct drm_i915_private *dev_priv = to_i915(dev);
6915 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6919 * WaFbcDisableDpfcClockGating:ilk
6921 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6922 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6923 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6925 I915_WRITE(PCH_3DCGDIS0,
6926 MARIUNIT_CLOCK_GATE_DISABLE |
6927 SVSMUNIT_CLOCK_GATE_DISABLE);
6928 I915_WRITE(PCH_3DCGDIS1,
6929 VFMUNIT_CLOCK_GATE_DISABLE);
6932 * According to the spec the following bits should be set in
6933 * order to enable memory self-refresh
6934 * The bit 22/21 of 0x42004
6935 * The bit 5 of 0x42020
6936 * The bit 15 of 0x45000
6938 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6939 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6940 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6941 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6942 I915_WRITE(DISP_ARB_CTL,
6943 (I915_READ(DISP_ARB_CTL) |
6946 ilk_init_lp_watermarks(dev);
6949 * Based on the document from hardware guys the following bits
6950 * should be set unconditionally in order to enable FBC.
6951 * The bit 22 of 0x42000
6952 * The bit 22 of 0x42004
6953 * The bit 7,8,9 of 0x42020.
6955 if (IS_IRONLAKE_M(dev_priv)) {
6956 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6957 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6958 I915_READ(ILK_DISPLAY_CHICKEN1) |
6960 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6961 I915_READ(ILK_DISPLAY_CHICKEN2) |
6965 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6967 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6968 I915_READ(ILK_DISPLAY_CHICKEN2) |
6969 ILK_ELPIN_409_SELECT);
6970 I915_WRITE(_3D_CHICKEN2,
6971 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6972 _3D_CHICKEN2_WM_READ_PIPELINED);
6974 /* WaDisableRenderCachePipelinedFlush:ilk */
6975 I915_WRITE(CACHE_MODE_0,
6976 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6978 /* WaDisable_RenderCache_OperationalFlush:ilk */
6979 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6981 g4x_disable_trickle_feed(dev);
6983 ibx_init_clock_gating(dev);
6986 static void cpt_init_clock_gating(struct drm_device *dev)
6988 struct drm_i915_private *dev_priv = to_i915(dev);
6993 * On Ibex Peak and Cougar Point, we need to disable clock
6994 * gating for the panel power sequencer or it will fail to
6995 * start up when no ports are active.
6997 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6998 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6999 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7000 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7001 DPLS_EDP_PPS_FIX_DIS);
7002 /* The below fixes the weird display corruption, a few pixels shifted
7003 * downward, on (only) LVDS of some HP laptops with IVY.
7005 for_each_pipe(dev_priv, pipe) {
7006 val = I915_READ(TRANS_CHICKEN2(pipe));
7007 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7008 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7009 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7010 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7011 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
7012 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7013 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7014 I915_WRITE(TRANS_CHICKEN2(pipe), val);
7016 /* WADP0ClockGatingDisable */
7017 for_each_pipe(dev_priv, pipe) {
7018 I915_WRITE(TRANS_CHICKEN1(pipe),
7019 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7023 static void gen6_check_mch_setup(struct drm_device *dev)
7025 struct drm_i915_private *dev_priv = to_i915(dev);
7028 tmp = I915_READ(MCH_SSKPD);
7029 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7030 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7034 static void gen6_init_clock_gating(struct drm_device *dev)
7036 struct drm_i915_private *dev_priv = to_i915(dev);
7037 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7039 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7041 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7042 I915_READ(ILK_DISPLAY_CHICKEN2) |
7043 ILK_ELPIN_409_SELECT);
7045 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7046 I915_WRITE(_3D_CHICKEN,
7047 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
7049 /* WaDisable_RenderCache_OperationalFlush:snb */
7050 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7053 * BSpec recoomends 8x4 when MSAA is used,
7054 * however in practice 16x4 seems fastest.
7056 * Note that PS/WM thread counts depend on the WIZ hashing
7057 * disable bit, which we don't touch here, but it's good
7058 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7060 I915_WRITE(GEN6_GT_MODE,
7061 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7063 ilk_init_lp_watermarks(dev);
7065 I915_WRITE(CACHE_MODE_0,
7066 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
7068 I915_WRITE(GEN6_UCGCTL1,
7069 I915_READ(GEN6_UCGCTL1) |
7070 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7071 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7073 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7074 * gating disable must be set. Failure to set it results in
7075 * flickering pixels due to Z write ordering failures after
7076 * some amount of runtime in the Mesa "fire" demo, and Unigine
7077 * Sanctuary and Tropics, and apparently anything else with
7078 * alpha test or pixel discard.
7080 * According to the spec, bit 11 (RCCUNIT) must also be set,
7081 * but we didn't debug actual testcases to find it out.
7083 * WaDisableRCCUnitClockGating:snb
7084 * WaDisableRCPBUnitClockGating:snb
7086 I915_WRITE(GEN6_UCGCTL2,
7087 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7088 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7090 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7091 I915_WRITE(_3D_CHICKEN3,
7092 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
7096 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7097 * 3DSTATE_SF number of SF output attributes is more than 16."
7099 I915_WRITE(_3D_CHICKEN3,
7100 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
7103 * According to the spec the following bits should be
7104 * set in order to enable memory self-refresh and fbc:
7105 * The bit21 and bit22 of 0x42000
7106 * The bit21 and bit22 of 0x42004
7107 * The bit5 and bit7 of 0x42020
7108 * The bit14 of 0x70180
7109 * The bit14 of 0x71180
7111 * WaFbcAsynchFlipDisableFbcQueue:snb
7113 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7114 I915_READ(ILK_DISPLAY_CHICKEN1) |
7115 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7116 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7117 I915_READ(ILK_DISPLAY_CHICKEN2) |
7118 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7119 I915_WRITE(ILK_DSPCLK_GATE_D,
7120 I915_READ(ILK_DSPCLK_GATE_D) |
7121 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7122 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7124 g4x_disable_trickle_feed(dev);
7126 cpt_init_clock_gating(dev);
7128 gen6_check_mch_setup(dev);
7131 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
7133 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
7136 * WaVSThreadDispatchOverride:ivb,vlv
7138 * This actually overrides the dispatch
7139 * mode for all thread types.
7141 reg &= ~GEN7_FF_SCHED_MASK;
7142 reg |= GEN7_FF_TS_SCHED_HW;
7143 reg |= GEN7_FF_VS_SCHED_HW;
7144 reg |= GEN7_FF_DS_SCHED_HW;
7146 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
7149 static void lpt_init_clock_gating(struct drm_device *dev)
7151 struct drm_i915_private *dev_priv = to_i915(dev);
7154 * TODO: this bit should only be enabled when really needed, then
7155 * disabled when not needed anymore in order to save power.
7157 if (HAS_PCH_LPT_LP(dev_priv))
7158 I915_WRITE(SOUTH_DSPCLK_GATE_D,
7159 I915_READ(SOUTH_DSPCLK_GATE_D) |
7160 PCH_LP_PARTITION_LEVEL_DISABLE);
7162 /* WADPOClockGatingDisable:hsw */
7163 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7164 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7165 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7168 static void lpt_suspend_hw(struct drm_device *dev)
7170 struct drm_i915_private *dev_priv = to_i915(dev);
7172 if (HAS_PCH_LPT_LP(dev_priv)) {
7173 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7175 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7176 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7180 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7181 int general_prio_credits,
7182 int high_prio_credits)
7186 /* WaTempDisableDOPClkGating:bdw */
7187 misccpctl = I915_READ(GEN7_MISCCPCTL);
7188 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7190 I915_WRITE(GEN8_L3SQCREG1,
7191 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7192 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7195 * Wait at least 100 clocks before re-enabling clock gating.
7196 * See the definition of L3SQCREG1 in BSpec.
7198 POSTING_READ(GEN8_L3SQCREG1);
7200 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7203 static void kabylake_init_clock_gating(struct drm_device *dev)
7205 struct drm_i915_private *dev_priv = dev->dev_private;
7207 gen9_init_clock_gating(dev);
7209 /* WaDisableSDEUnitClockGating:kbl */
7210 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7211 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7212 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7214 /* WaDisableGamClockGating:kbl */
7215 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7216 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7217 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7219 /* WaFbcNukeOnHostModify:kbl */
7220 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7221 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7224 static void skylake_init_clock_gating(struct drm_device *dev)
7226 struct drm_i915_private *dev_priv = dev->dev_private;
7228 gen9_init_clock_gating(dev);
7230 /* WAC6entrylatency:skl */
7231 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7232 FBC_LLC_FULLY_OPEN);
7234 /* WaFbcNukeOnHostModify:skl */
7235 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7236 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7239 static void broadwell_init_clock_gating(struct drm_device *dev)
7241 struct drm_i915_private *dev_priv = to_i915(dev);
7244 ilk_init_lp_watermarks(dev);
7246 /* WaSwitchSolVfFArbitrationPriority:bdw */
7247 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7249 /* WaPsrDPAMaskVBlankInSRD:bdw */
7250 I915_WRITE(CHICKEN_PAR1_1,
7251 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7253 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7254 for_each_pipe(dev_priv, pipe) {
7255 I915_WRITE(CHICKEN_PIPESL_1(pipe),
7256 I915_READ(CHICKEN_PIPESL_1(pipe)) |
7257 BDW_DPRS_MASK_VBLANK_SRD);
7260 /* WaVSRefCountFullforceMissDisable:bdw */
7261 /* WaDSRefCountFullforceMissDisable:bdw */
7262 I915_WRITE(GEN7_FF_THREAD_MODE,
7263 I915_READ(GEN7_FF_THREAD_MODE) &
7264 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7266 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7267 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7269 /* WaDisableSDEUnitClockGating:bdw */
7270 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7271 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7273 /* WaProgramL3SqcReg1Default:bdw */
7274 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7277 * WaGttCachingOffByDefault:bdw
7278 * GTT cache may not work with big pages, so if those
7279 * are ever enabled GTT cache may need to be disabled.
7281 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7283 /* WaKVMNotificationOnConfigChange:bdw */
7284 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7285 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7287 lpt_init_clock_gating(dev);
7290 static void haswell_init_clock_gating(struct drm_device *dev)
7292 struct drm_i915_private *dev_priv = to_i915(dev);
7294 ilk_init_lp_watermarks(dev);
7296 /* L3 caching of data atomics doesn't work -- disable it. */
7297 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7298 I915_WRITE(HSW_ROW_CHICKEN3,
7299 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7301 /* This is required by WaCatErrorRejectionIssue:hsw */
7302 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7303 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7304 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7306 /* WaVSRefCountFullforceMissDisable:hsw */
7307 I915_WRITE(GEN7_FF_THREAD_MODE,
7308 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7310 /* WaDisable_RenderCache_OperationalFlush:hsw */
7311 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7313 /* enable HiZ Raw Stall Optimization */
7314 I915_WRITE(CACHE_MODE_0_GEN7,
7315 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7317 /* WaDisable4x2SubspanOptimization:hsw */
7318 I915_WRITE(CACHE_MODE_1,
7319 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7322 * BSpec recommends 8x4 when MSAA is used,
7323 * however in practice 16x4 seems fastest.
7325 * Note that PS/WM thread counts depend on the WIZ hashing
7326 * disable bit, which we don't touch here, but it's good
7327 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7329 I915_WRITE(GEN7_GT_MODE,
7330 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7332 /* WaSampleCChickenBitEnable:hsw */
7333 I915_WRITE(HALF_SLICE_CHICKEN3,
7334 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7336 /* WaSwitchSolVfFArbitrationPriority:hsw */
7337 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7339 /* WaRsPkgCStateDisplayPMReq:hsw */
7340 I915_WRITE(CHICKEN_PAR1_1,
7341 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7343 lpt_init_clock_gating(dev);
7346 static void ivybridge_init_clock_gating(struct drm_device *dev)
7348 struct drm_i915_private *dev_priv = to_i915(dev);
7351 ilk_init_lp_watermarks(dev);
7353 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7355 /* WaDisableEarlyCull:ivb */
7356 I915_WRITE(_3D_CHICKEN3,
7357 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7359 /* WaDisableBackToBackFlipFix:ivb */
7360 I915_WRITE(IVB_CHICKEN3,
7361 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7362 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7364 /* WaDisablePSDDualDispatchEnable:ivb */
7365 if (IS_IVB_GT1(dev_priv))
7366 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7367 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7369 /* WaDisable_RenderCache_OperationalFlush:ivb */
7370 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7372 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7373 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7374 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7376 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7377 I915_WRITE(GEN7_L3CNTLREG1,
7378 GEN7_WA_FOR_GEN7_L3_CONTROL);
7379 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7380 GEN7_WA_L3_CHICKEN_MODE);
7381 if (IS_IVB_GT1(dev_priv))
7382 I915_WRITE(GEN7_ROW_CHICKEN2,
7383 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7385 /* must write both registers */
7386 I915_WRITE(GEN7_ROW_CHICKEN2,
7387 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7388 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7389 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7392 /* WaForceL3Serialization:ivb */
7393 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7394 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7397 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7398 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7400 I915_WRITE(GEN6_UCGCTL2,
7401 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7403 /* This is required by WaCatErrorRejectionIssue:ivb */
7404 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7405 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7406 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7408 g4x_disable_trickle_feed(dev);
7410 gen7_setup_fixed_func_scheduler(dev_priv);
7412 if (0) { /* causes HiZ corruption on ivb:gt1 */
7413 /* enable HiZ Raw Stall Optimization */
7414 I915_WRITE(CACHE_MODE_0_GEN7,
7415 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7418 /* WaDisable4x2SubspanOptimization:ivb */
7419 I915_WRITE(CACHE_MODE_1,
7420 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7423 * BSpec recommends 8x4 when MSAA is used,
7424 * however in practice 16x4 seems fastest.
7426 * Note that PS/WM thread counts depend on the WIZ hashing
7427 * disable bit, which we don't touch here, but it's good
7428 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7430 I915_WRITE(GEN7_GT_MODE,
7431 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7433 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7434 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7435 snpcr |= GEN6_MBC_SNPCR_MED;
7436 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7438 if (!HAS_PCH_NOP(dev_priv))
7439 cpt_init_clock_gating(dev);
7441 gen6_check_mch_setup(dev);
7444 static void valleyview_init_clock_gating(struct drm_device *dev)
7446 struct drm_i915_private *dev_priv = to_i915(dev);
7448 /* WaDisableEarlyCull:vlv */
7449 I915_WRITE(_3D_CHICKEN3,
7450 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7452 /* WaDisableBackToBackFlipFix:vlv */
7453 I915_WRITE(IVB_CHICKEN3,
7454 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7455 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7457 /* WaPsdDispatchEnable:vlv */
7458 /* WaDisablePSDDualDispatchEnable:vlv */
7459 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7460 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7461 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7463 /* WaDisable_RenderCache_OperationalFlush:vlv */
7464 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7466 /* WaForceL3Serialization:vlv */
7467 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7468 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7470 /* WaDisableDopClockGating:vlv */
7471 I915_WRITE(GEN7_ROW_CHICKEN2,
7472 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7474 /* This is required by WaCatErrorRejectionIssue:vlv */
7475 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7476 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7477 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7479 gen7_setup_fixed_func_scheduler(dev_priv);
7482 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7483 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7485 I915_WRITE(GEN6_UCGCTL2,
7486 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7488 /* WaDisableL3Bank2xClockGate:vlv
7489 * Disabling L3 clock gating- MMIO 940c[25] = 1
7490 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7491 I915_WRITE(GEN7_UCGCTL4,
7492 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7495 * BSpec says this must be set, even though
7496 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7498 I915_WRITE(CACHE_MODE_1,
7499 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7502 * BSpec recommends 8x4 when MSAA is used,
7503 * however in practice 16x4 seems fastest.
7505 * Note that PS/WM thread counts depend on the WIZ hashing
7506 * disable bit, which we don't touch here, but it's good
7507 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7509 I915_WRITE(GEN7_GT_MODE,
7510 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7513 * WaIncreaseL3CreditsForVLVB0:vlv
7514 * This is the hardware default actually.
7516 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7519 * WaDisableVLVClockGating_VBIIssue:vlv
7520 * Disable clock gating on th GCFG unit to prevent a delay
7521 * in the reporting of vblank events.
7523 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7526 static void cherryview_init_clock_gating(struct drm_device *dev)
7528 struct drm_i915_private *dev_priv = to_i915(dev);
7530 /* WaVSRefCountFullforceMissDisable:chv */
7531 /* WaDSRefCountFullforceMissDisable:chv */
7532 I915_WRITE(GEN7_FF_THREAD_MODE,
7533 I915_READ(GEN7_FF_THREAD_MODE) &
7534 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7536 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7537 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7538 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7540 /* WaDisableCSUnitClockGating:chv */
7541 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7542 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7544 /* WaDisableSDEUnitClockGating:chv */
7545 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7546 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7549 * WaProgramL3SqcReg1Default:chv
7550 * See gfxspecs/Related Documents/Performance Guide/
7551 * LSQC Setting Recommendations.
7553 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7556 * GTT cache may not work with big pages, so if those
7557 * are ever enabled GTT cache may need to be disabled.
7559 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7562 static void g4x_init_clock_gating(struct drm_device *dev)
7564 struct drm_i915_private *dev_priv = to_i915(dev);
7565 uint32_t dspclk_gate;
7567 I915_WRITE(RENCLK_GATE_D1, 0);
7568 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7569 GS_UNIT_CLOCK_GATE_DISABLE |
7570 CL_UNIT_CLOCK_GATE_DISABLE);
7571 I915_WRITE(RAMCLK_GATE_D, 0);
7572 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7573 OVRUNIT_CLOCK_GATE_DISABLE |
7574 OVCUNIT_CLOCK_GATE_DISABLE;
7575 if (IS_GM45(dev_priv))
7576 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7577 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7579 /* WaDisableRenderCachePipelinedFlush */
7580 I915_WRITE(CACHE_MODE_0,
7581 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7583 /* WaDisable_RenderCache_OperationalFlush:g4x */
7584 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7586 g4x_disable_trickle_feed(dev);
7589 static void crestline_init_clock_gating(struct drm_device *dev)
7591 struct drm_i915_private *dev_priv = to_i915(dev);
7593 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7594 I915_WRITE(RENCLK_GATE_D2, 0);
7595 I915_WRITE(DSPCLK_GATE_D, 0);
7596 I915_WRITE(RAMCLK_GATE_D, 0);
7597 I915_WRITE16(DEUC, 0);
7598 I915_WRITE(MI_ARB_STATE,
7599 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7601 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7602 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7605 static void broadwater_init_clock_gating(struct drm_device *dev)
7607 struct drm_i915_private *dev_priv = to_i915(dev);
7609 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7610 I965_RCC_CLOCK_GATE_DISABLE |
7611 I965_RCPB_CLOCK_GATE_DISABLE |
7612 I965_ISC_CLOCK_GATE_DISABLE |
7613 I965_FBC_CLOCK_GATE_DISABLE);
7614 I915_WRITE(RENCLK_GATE_D2, 0);
7615 I915_WRITE(MI_ARB_STATE,
7616 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7618 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7619 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7622 static void gen3_init_clock_gating(struct drm_device *dev)
7624 struct drm_i915_private *dev_priv = to_i915(dev);
7625 u32 dstate = I915_READ(D_STATE);
7627 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7628 DSTATE_DOT_CLOCK_GATING;
7629 I915_WRITE(D_STATE, dstate);
7631 if (IS_PINEVIEW(dev))
7632 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7634 /* IIR "flip pending" means done if this bit is set */
7635 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7637 /* interrupts should cause a wake up from C3 */
7638 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7640 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7641 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7643 I915_WRITE(MI_ARB_STATE,
7644 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7647 static void i85x_init_clock_gating(struct drm_device *dev)
7649 struct drm_i915_private *dev_priv = to_i915(dev);
7651 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7653 /* interrupts should cause a wake up from C3 */
7654 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7655 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7657 I915_WRITE(MEM_MODE,
7658 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7661 static void i830_init_clock_gating(struct drm_device *dev)
7663 struct drm_i915_private *dev_priv = to_i915(dev);
7665 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7667 I915_WRITE(MEM_MODE,
7668 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7669 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7672 void intel_init_clock_gating(struct drm_device *dev)
7674 struct drm_i915_private *dev_priv = to_i915(dev);
7676 dev_priv->display.init_clock_gating(dev);
7679 void intel_suspend_hw(struct drm_device *dev)
7681 if (HAS_PCH_LPT(to_i915(dev)))
7682 lpt_suspend_hw(dev);
7685 static void nop_init_clock_gating(struct drm_device *dev)
7687 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7691 * intel_init_clock_gating_hooks - setup the clock gating hooks
7692 * @dev_priv: device private
7694 * Setup the hooks that configure which clocks of a given platform can be
7695 * gated and also apply various GT and display specific workarounds for these
7696 * platforms. Note that some GT specific workarounds are applied separately
7697 * when GPU contexts or batchbuffers start their execution.
7699 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7701 if (IS_SKYLAKE(dev_priv))
7702 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7703 else if (IS_KABYLAKE(dev_priv))
7704 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7705 else if (IS_BROXTON(dev_priv))
7706 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7707 else if (IS_BROADWELL(dev_priv))
7708 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7709 else if (IS_CHERRYVIEW(dev_priv))
7710 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7711 else if (IS_HASWELL(dev_priv))
7712 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7713 else if (IS_IVYBRIDGE(dev_priv))
7714 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7715 else if (IS_VALLEYVIEW(dev_priv))
7716 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7717 else if (IS_GEN6(dev_priv))
7718 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7719 else if (IS_GEN5(dev_priv))
7720 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7721 else if (IS_G4X(dev_priv))
7722 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7723 else if (IS_CRESTLINE(dev_priv))
7724 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7725 else if (IS_BROADWATER(dev_priv))
7726 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7727 else if (IS_GEN3(dev_priv))
7728 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7729 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7730 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7731 else if (IS_GEN2(dev_priv))
7732 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7734 MISSING_CASE(INTEL_DEVID(dev_priv));
7735 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7739 /* Set up chip specific power management-related functions */
7740 void intel_init_pm(struct drm_device *dev)
7742 struct drm_i915_private *dev_priv = to_i915(dev);
7744 intel_fbc_init(dev_priv);
7747 if (IS_PINEVIEW(dev))
7748 i915_pineview_get_mem_freq(dev);
7749 else if (IS_GEN5(dev_priv))
7750 i915_ironlake_get_mem_freq(dev);
7752 /* For FIFO watermark updates */
7753 if (INTEL_INFO(dev)->gen >= 9) {
7754 skl_setup_wm_latency(dev);
7755 dev_priv->display.update_wm = skl_update_wm;
7756 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7757 } else if (HAS_PCH_SPLIT(dev_priv)) {
7758 ilk_setup_wm_latency(dev);
7760 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
7761 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7762 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
7763 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7764 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7765 dev_priv->display.compute_intermediate_wm =
7766 ilk_compute_intermediate_wm;
7767 dev_priv->display.initial_watermarks =
7768 ilk_initial_watermarks;
7769 dev_priv->display.optimize_watermarks =
7770 ilk_optimize_watermarks;
7772 DRM_DEBUG_KMS("Failed to read display plane latency. "
7775 } else if (IS_CHERRYVIEW(dev_priv)) {
7776 vlv_setup_wm_latency(dev);
7777 dev_priv->display.update_wm = vlv_update_wm;
7778 } else if (IS_VALLEYVIEW(dev_priv)) {
7779 vlv_setup_wm_latency(dev);
7780 dev_priv->display.update_wm = vlv_update_wm;
7781 } else if (IS_PINEVIEW(dev)) {
7782 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
7785 dev_priv->mem_freq)) {
7786 DRM_INFO("failed to find known CxSR latency "
7787 "(found ddr%s fsb freq %d, mem freq %d), "
7789 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7790 dev_priv->fsb_freq, dev_priv->mem_freq);
7791 /* Disable CxSR and never update its watermark again */
7792 intel_set_memory_cxsr(dev_priv, false);
7793 dev_priv->display.update_wm = NULL;
7795 dev_priv->display.update_wm = pineview_update_wm;
7796 } else if (IS_G4X(dev_priv)) {
7797 dev_priv->display.update_wm = g4x_update_wm;
7798 } else if (IS_GEN4(dev_priv)) {
7799 dev_priv->display.update_wm = i965_update_wm;
7800 } else if (IS_GEN3(dev_priv)) {
7801 dev_priv->display.update_wm = i9xx_update_wm;
7802 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7803 } else if (IS_GEN2(dev_priv)) {
7804 if (INTEL_INFO(dev)->num_pipes == 1) {
7805 dev_priv->display.update_wm = i845_update_wm;
7806 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7808 dev_priv->display.update_wm = i9xx_update_wm;
7809 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7812 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7816 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7819 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7822 case GEN6_PCODE_SUCCESS:
7824 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7825 case GEN6_PCODE_ILLEGAL_CMD:
7827 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7828 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7830 case GEN6_PCODE_TIMEOUT:
7838 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7841 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7844 case GEN6_PCODE_SUCCESS:
7846 case GEN6_PCODE_ILLEGAL_CMD:
7848 case GEN7_PCODE_TIMEOUT:
7850 case GEN7_PCODE_ILLEGAL_DATA:
7852 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7855 MISSING_CASE(flags);
7860 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7864 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7866 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7867 * use te fw I915_READ variants to reduce the amount of work
7868 * required when reading/writing.
7871 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7872 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7876 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7877 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7878 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7880 if (intel_wait_for_register_fw(dev_priv,
7881 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7883 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7887 *val = I915_READ_FW(GEN6_PCODE_DATA);
7888 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7890 if (INTEL_GEN(dev_priv) > 6)
7891 status = gen7_check_mailbox_status(dev_priv);
7893 status = gen6_check_mailbox_status(dev_priv);
7896 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7904 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7909 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7911 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7912 * use te fw I915_READ variants to reduce the amount of work
7913 * required when reading/writing.
7916 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7917 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7921 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7922 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7924 if (intel_wait_for_register_fw(dev_priv,
7925 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7927 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7931 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7933 if (INTEL_GEN(dev_priv) > 6)
7934 status = gen7_check_mailbox_status(dev_priv);
7936 status = gen6_check_mailbox_status(dev_priv);
7939 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7947 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7951 * Slow = Fast = GPLL ref * N
7953 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7956 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7958 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7961 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7965 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7967 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7970 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7972 /* CHV needs even values */
7973 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
7976 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7978 if (IS_GEN9(dev_priv))
7979 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7981 else if (IS_CHERRYVIEW(dev_priv))
7982 return chv_gpu_freq(dev_priv, val);
7983 else if (IS_VALLEYVIEW(dev_priv))
7984 return byt_gpu_freq(dev_priv, val);
7986 return val * GT_FREQUENCY_MULTIPLIER;
7989 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7991 if (IS_GEN9(dev_priv))
7992 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7993 GT_FREQUENCY_MULTIPLIER);
7994 else if (IS_CHERRYVIEW(dev_priv))
7995 return chv_freq_opcode(dev_priv, val);
7996 else if (IS_VALLEYVIEW(dev_priv))
7997 return byt_freq_opcode(dev_priv, val);
7999 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
8002 struct request_boost {
8003 struct work_struct work;
8004 struct drm_i915_gem_request *req;
8007 static void __intel_rps_boost_work(struct work_struct *work)
8009 struct request_boost *boost = container_of(work, struct request_boost, work);
8010 struct drm_i915_gem_request *req = boost->req;
8012 if (!i915_gem_request_completed(req))
8013 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
8015 i915_gem_request_put(req);
8019 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
8021 struct request_boost *boost;
8023 if (req == NULL || INTEL_GEN(req->i915) < 6)
8026 if (i915_gem_request_completed(req))
8029 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
8033 boost->req = i915_gem_request_get(req);
8035 INIT_WORK(&boost->work, __intel_rps_boost_work);
8036 queue_work(req->i915->wq, &boost->work);
8039 void intel_pm_setup(struct drm_device *dev)
8041 struct drm_i915_private *dev_priv = to_i915(dev);
8043 mutex_init(&dev_priv->rps.hw_lock);
8044 spin_lock_init(&dev_priv->rps.client_lock);
8046 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
8047 __intel_autoenable_gt_powersave);
8048 INIT_LIST_HEAD(&dev_priv->rps.clients);
8050 dev_priv->pm.suspended = false;
8051 atomic_set(&dev_priv->pm.wakeref_count, 0);