2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <linux/module.h>
30 #include <linux/pm_runtime.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_fourcc.h>
34 #include <drm/drm_plane_helper.h>
38 #include "intel_atomic.h"
39 #include "intel_drv.h"
40 #include "intel_fbc.h"
42 #include "intel_sprite.h"
43 #include "intel_sideband.h"
44 #include "../../../platform/x86/intel_ips.h"
49 * RC6 is a special power stage which allows the GPU to enter an very
50 * low-voltage mode when idle, using down to 0V while at this stage. This
51 * stage is entered automatically when the GPU is idle when RC6 support is
52 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
54 * There are different RC6 modes available in Intel GPU, which differentiate
55 * among each other with the latency required to enter and leave RC6 and
56 * voltage consumed by the GPU in different states.
58 * The combination of the following flags define which states GPU is allowed
59 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
60 * RC6pp is deepest RC6. Their support by hardware varies according to the
61 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
62 * which brings the most power savings; deeper states save more power, but
63 * require higher latency to switch to and wake up.
66 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
68 if (HAS_LLC(dev_priv)) {
70 * WaCompressedResourceDisplayNewHashMode:skl,kbl
71 * Display WA #0390: skl,kbl
73 * Must match Sampler, Pixel Back End, and Media. See
74 * WaCompressedResourceSamplerPbeMediaNewHashMode.
76 I915_WRITE(CHICKEN_PAR1_1,
77 I915_READ(CHICKEN_PAR1_1) |
78 SKL_DE_COMPRESSED_HASH_MODE);
81 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
82 I915_WRITE(CHICKEN_PAR1_1,
83 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
85 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
86 I915_WRITE(GEN8_CHICKEN_DCPR_1,
87 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
89 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
90 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
91 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
93 DISP_FBC_MEMORY_WAKE);
95 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
96 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
97 ILK_DPFC_DISABLE_DUMMY0);
99 if (IS_SKYLAKE(dev_priv)) {
100 /* WaDisableDopClockGating */
101 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
102 & ~GEN7_DOP_CLOCK_GATE_ENABLE);
106 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
108 gen9_init_clock_gating(dev_priv);
110 /* WaDisableSDEUnitClockGating:bxt */
111 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
112 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
116 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
118 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
119 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
122 * Wa: Backlight PWM may stop in the asserted state, causing backlight
125 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
126 PWM1_GATING_DIS | PWM2_GATING_DIS);
129 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
131 gen9_init_clock_gating(dev_priv);
134 * WaDisablePWMClockGating:glk
135 * Backlight PWM may stop in the asserted state, causing backlight
138 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
139 PWM1_GATING_DIS | PWM2_GATING_DIS);
141 /* WaDDIIOTimeout:glk */
142 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
143 u32 val = I915_READ(CHICKEN_MISC_2);
144 val &= ~(GLK_CL0_PWR_DOWN |
147 I915_WRITE(CHICKEN_MISC_2, val);
152 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
156 tmp = I915_READ(CLKCFG);
158 switch (tmp & CLKCFG_FSB_MASK) {
160 dev_priv->fsb_freq = 533; /* 133*4 */
163 dev_priv->fsb_freq = 800; /* 200*4 */
166 dev_priv->fsb_freq = 667; /* 167*4 */
169 dev_priv->fsb_freq = 400; /* 100*4 */
173 switch (tmp & CLKCFG_MEM_MASK) {
175 dev_priv->mem_freq = 533;
178 dev_priv->mem_freq = 667;
181 dev_priv->mem_freq = 800;
185 /* detect pineview DDR3 setting */
186 tmp = I915_READ(CSHRDDR3CTL);
187 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
190 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
194 ddrpll = I915_READ16(DDRMPLL1);
195 csipll = I915_READ16(CSIPLL0);
197 switch (ddrpll & 0xff) {
199 dev_priv->mem_freq = 800;
202 dev_priv->mem_freq = 1066;
205 dev_priv->mem_freq = 1333;
208 dev_priv->mem_freq = 1600;
211 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
213 dev_priv->mem_freq = 0;
217 dev_priv->ips.r_t = dev_priv->mem_freq;
219 switch (csipll & 0x3ff) {
221 dev_priv->fsb_freq = 3200;
224 dev_priv->fsb_freq = 3733;
227 dev_priv->fsb_freq = 4266;
230 dev_priv->fsb_freq = 4800;
233 dev_priv->fsb_freq = 5333;
236 dev_priv->fsb_freq = 5866;
239 dev_priv->fsb_freq = 6400;
242 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
244 dev_priv->fsb_freq = 0;
248 if (dev_priv->fsb_freq == 3200) {
249 dev_priv->ips.c_m = 0;
250 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
251 dev_priv->ips.c_m = 1;
253 dev_priv->ips.c_m = 2;
257 static const struct cxsr_latency cxsr_latency_table[] = {
258 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
259 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
260 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
261 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
262 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
264 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
265 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
266 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
267 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
268 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
270 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
271 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
272 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
273 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
274 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
276 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
277 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
278 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
279 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
280 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
282 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
283 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
284 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
285 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
286 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
288 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
289 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
290 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
291 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
292 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
295 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
300 const struct cxsr_latency *latency;
303 if (fsb == 0 || mem == 0)
306 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
307 latency = &cxsr_latency_table[i];
308 if (is_desktop == latency->is_desktop &&
309 is_ddr3 == latency->is_ddr3 &&
310 fsb == latency->fsb_freq && mem == latency->mem_freq)
314 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
319 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
323 vlv_punit_get(dev_priv);
325 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
327 val &= ~FORCE_DDR_HIGH_FREQ;
329 val |= FORCE_DDR_HIGH_FREQ;
330 val &= ~FORCE_DDR_LOW_FREQ;
331 val |= FORCE_DDR_FREQ_REQ_ACK;
332 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
334 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
335 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
336 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
338 vlv_punit_put(dev_priv);
341 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
345 vlv_punit_get(dev_priv);
347 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
349 val |= DSP_MAXFIFO_PM5_ENABLE;
351 val &= ~DSP_MAXFIFO_PM5_ENABLE;
352 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
354 vlv_punit_put(dev_priv);
357 #define FW_WM(value, plane) \
358 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
360 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
365 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
366 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
367 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
368 POSTING_READ(FW_BLC_SELF_VLV);
369 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
370 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
371 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
372 POSTING_READ(FW_BLC_SELF);
373 } else if (IS_PINEVIEW(dev_priv)) {
374 val = I915_READ(DSPFW3);
375 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
377 val |= PINEVIEW_SELF_REFRESH_EN;
379 val &= ~PINEVIEW_SELF_REFRESH_EN;
380 I915_WRITE(DSPFW3, val);
381 POSTING_READ(DSPFW3);
382 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
383 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
384 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
385 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
386 I915_WRITE(FW_BLC_SELF, val);
387 POSTING_READ(FW_BLC_SELF);
388 } else if (IS_I915GM(dev_priv)) {
390 * FIXME can't find a bit like this for 915G, and
391 * and yet it does have the related watermark in
392 * FW_BLC_SELF. What's going on?
394 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
395 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
396 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
397 I915_WRITE(INSTPM, val);
398 POSTING_READ(INSTPM);
403 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
405 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
406 enableddisabled(enable),
407 enableddisabled(was_enabled));
413 * intel_set_memory_cxsr - Configure CxSR state
414 * @dev_priv: i915 device
415 * @enable: Allow vs. disallow CxSR
417 * Allow or disallow the system to enter a special CxSR
418 * (C-state self refresh) state. What typically happens in CxSR mode
419 * is that several display FIFOs may get combined into a single larger
420 * FIFO for a particular plane (so called max FIFO mode) to allow the
421 * system to defer memory fetches longer, and the memory will enter
424 * Note that enabling CxSR does not guarantee that the system enter
425 * this special mode, nor does it guarantee that the system stays
426 * in that mode once entered. So this just allows/disallows the system
427 * to autonomously utilize the CxSR mode. Other factors such as core
428 * C-states will affect when/if the system actually enters/exits the
431 * Note that on VLV/CHV this actually only controls the max FIFO mode,
432 * and the system is free to enter/exit memory self refresh at any time
433 * even when the use of CxSR has been disallowed.
435 * While the system is actually in the CxSR/max FIFO mode, some plane
436 * control registers will not get latched on vblank. Thus in order to
437 * guarantee the system will respond to changes in the plane registers
438 * we must always disallow CxSR prior to making changes to those registers.
439 * Unfortunately the system will re-evaluate the CxSR conditions at
440 * frame start which happens after vblank start (which is when the plane
441 * registers would get latched), so we can't proceed with the plane update
442 * during the same frame where we disallowed CxSR.
444 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
445 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
446 * the hardware w.r.t. HPLL SR when writing to plane registers.
447 * Disallowing just CxSR is sufficient.
449 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
453 mutex_lock(&dev_priv->wm.wm_mutex);
454 ret = _intel_set_memory_cxsr(dev_priv, enable);
455 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
456 dev_priv->wm.vlv.cxsr = enable;
457 else if (IS_G4X(dev_priv))
458 dev_priv->wm.g4x.cxsr = enable;
459 mutex_unlock(&dev_priv->wm.wm_mutex);
465 * Latency for FIFO fetches is dependent on several factors:
466 * - memory configuration (speed, channels)
468 * - current MCH state
469 * It can be fairly high in some situations, so here we assume a fairly
470 * pessimal value. It's a tradeoff between extra memory fetches (if we
471 * set this value too high, the FIFO will fetch frequently to stay full)
472 * and power consumption (set it too low to save power and we might see
473 * FIFO underruns and display "flicker").
475 * A value of 5us seems to be a good balance; safe for very low end
476 * platforms but not overly aggressive on lower latency configs.
478 static const int pessimal_latency_ns = 5000;
480 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
481 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
483 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
485 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
487 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
488 enum pipe pipe = crtc->pipe;
489 int sprite0_start, sprite1_start;
492 u32 dsparb, dsparb2, dsparb3;
494 dsparb = I915_READ(DSPARB);
495 dsparb2 = I915_READ(DSPARB2);
496 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
497 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
500 dsparb = I915_READ(DSPARB);
501 dsparb2 = I915_READ(DSPARB2);
502 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
503 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
506 dsparb2 = I915_READ(DSPARB2);
507 dsparb3 = I915_READ(DSPARB3);
508 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
509 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
516 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
517 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
518 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
519 fifo_state->plane[PLANE_CURSOR] = 63;
522 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
523 enum i9xx_plane_id i9xx_plane)
525 u32 dsparb = I915_READ(DSPARB);
528 size = dsparb & 0x7f;
529 if (i9xx_plane == PLANE_B)
530 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
532 DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
533 dsparb, plane_name(i9xx_plane), size);
538 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
539 enum i9xx_plane_id i9xx_plane)
541 u32 dsparb = I915_READ(DSPARB);
544 size = dsparb & 0x1ff;
545 if (i9xx_plane == PLANE_B)
546 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
547 size >>= 1; /* Convert to cachelines */
549 DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
550 dsparb, plane_name(i9xx_plane), size);
555 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
556 enum i9xx_plane_id i9xx_plane)
558 u32 dsparb = I915_READ(DSPARB);
561 size = dsparb & 0x7f;
562 size >>= 2; /* Convert to cachelines */
564 DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
565 dsparb, plane_name(i9xx_plane), size);
570 /* Pineview has different values for various configs */
571 static const struct intel_watermark_params pineview_display_wm = {
572 .fifo_size = PINEVIEW_DISPLAY_FIFO,
573 .max_wm = PINEVIEW_MAX_WM,
574 .default_wm = PINEVIEW_DFT_WM,
575 .guard_size = PINEVIEW_GUARD_WM,
576 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
578 static const struct intel_watermark_params pineview_display_hplloff_wm = {
579 .fifo_size = PINEVIEW_DISPLAY_FIFO,
580 .max_wm = PINEVIEW_MAX_WM,
581 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
582 .guard_size = PINEVIEW_GUARD_WM,
583 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
585 static const struct intel_watermark_params pineview_cursor_wm = {
586 .fifo_size = PINEVIEW_CURSOR_FIFO,
587 .max_wm = PINEVIEW_CURSOR_MAX_WM,
588 .default_wm = PINEVIEW_CURSOR_DFT_WM,
589 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
590 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
592 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
593 .fifo_size = PINEVIEW_CURSOR_FIFO,
594 .max_wm = PINEVIEW_CURSOR_MAX_WM,
595 .default_wm = PINEVIEW_CURSOR_DFT_WM,
596 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
597 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
599 static const struct intel_watermark_params i965_cursor_wm_info = {
600 .fifo_size = I965_CURSOR_FIFO,
601 .max_wm = I965_CURSOR_MAX_WM,
602 .default_wm = I965_CURSOR_DFT_WM,
604 .cacheline_size = I915_FIFO_LINE_SIZE,
606 static const struct intel_watermark_params i945_wm_info = {
607 .fifo_size = I945_FIFO_SIZE,
608 .max_wm = I915_MAX_WM,
611 .cacheline_size = I915_FIFO_LINE_SIZE,
613 static const struct intel_watermark_params i915_wm_info = {
614 .fifo_size = I915_FIFO_SIZE,
615 .max_wm = I915_MAX_WM,
618 .cacheline_size = I915_FIFO_LINE_SIZE,
620 static const struct intel_watermark_params i830_a_wm_info = {
621 .fifo_size = I855GM_FIFO_SIZE,
622 .max_wm = I915_MAX_WM,
625 .cacheline_size = I830_FIFO_LINE_SIZE,
627 static const struct intel_watermark_params i830_bc_wm_info = {
628 .fifo_size = I855GM_FIFO_SIZE,
629 .max_wm = I915_MAX_WM/2,
632 .cacheline_size = I830_FIFO_LINE_SIZE,
634 static const struct intel_watermark_params i845_wm_info = {
635 .fifo_size = I830_FIFO_SIZE,
636 .max_wm = I915_MAX_WM,
639 .cacheline_size = I830_FIFO_LINE_SIZE,
643 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
644 * @pixel_rate: Pipe pixel rate in kHz
645 * @cpp: Plane bytes per pixel
646 * @latency: Memory wakeup latency in 0.1us units
648 * Compute the watermark using the method 1 or "small buffer"
649 * formula. The caller may additonally add extra cachelines
650 * to account for TLB misses and clock crossings.
652 * This method is concerned with the short term drain rate
653 * of the FIFO, ie. it does not account for blanking periods
654 * which would effectively reduce the average drain rate across
655 * a longer period. The name "small" refers to the fact the
656 * FIFO is relatively small compared to the amount of data
659 * The FIFO level vs. time graph might look something like:
663 * __---__---__ (- plane active, _ blanking)
666 * or perhaps like this:
669 * __----__----__ (- plane active, _ blanking)
673 * The watermark in bytes
675 static unsigned int intel_wm_method1(unsigned int pixel_rate,
677 unsigned int latency)
681 ret = mul_u32_u32(pixel_rate, cpp * latency);
682 ret = DIV_ROUND_UP_ULL(ret, 10000);
688 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
689 * @pixel_rate: Pipe pixel rate in kHz
690 * @htotal: Pipe horizontal total
691 * @width: Plane width in pixels
692 * @cpp: Plane bytes per pixel
693 * @latency: Memory wakeup latency in 0.1us units
695 * Compute the watermark using the method 2 or "large buffer"
696 * formula. The caller may additonally add extra cachelines
697 * to account for TLB misses and clock crossings.
699 * This method is concerned with the long term drain rate
700 * of the FIFO, ie. it does account for blanking periods
701 * which effectively reduce the average drain rate across
702 * a longer period. The name "large" refers to the fact the
703 * FIFO is relatively large compared to the amount of data
706 * The FIFO level vs. time graph might look something like:
711 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
715 * The watermark in bytes
717 static unsigned int intel_wm_method2(unsigned int pixel_rate,
721 unsigned int latency)
726 * FIXME remove once all users are computing
727 * watermarks in the correct place.
729 if (WARN_ON_ONCE(htotal == 0))
732 ret = (latency * pixel_rate) / (htotal * 10000);
733 ret = (ret + 1) * width * cpp;
739 * intel_calculate_wm - calculate watermark level
740 * @pixel_rate: pixel clock
741 * @wm: chip FIFO params
742 * @fifo_size: size of the FIFO buffer
743 * @cpp: bytes per pixel
744 * @latency_ns: memory latency for the platform
746 * Calculate the watermark level (the level at which the display plane will
747 * start fetching from memory again). Each chip has a different display
748 * FIFO size and allocation, so the caller needs to figure that out and pass
749 * in the correct intel_watermark_params structure.
751 * As the pixel clock runs, the FIFO will be drained at a rate that depends
752 * on the pixel size. When it reaches the watermark level, it'll start
753 * fetching FIFO line sized based chunks from memory until the FIFO fills
754 * past the watermark point. If the FIFO drains completely, a FIFO underrun
755 * will occur, and a display engine hang could result.
757 static unsigned int intel_calculate_wm(int pixel_rate,
758 const struct intel_watermark_params *wm,
759 int fifo_size, int cpp,
760 unsigned int latency_ns)
762 int entries, wm_size;
765 * Note: we need to make sure we don't overflow for various clock &
767 * clocks go from a few thousand to several hundred thousand.
768 * latency is usually a few thousand
770 entries = intel_wm_method1(pixel_rate, cpp,
772 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
774 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
776 wm_size = fifo_size - entries;
777 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
779 /* Don't promote wm_size to unsigned... */
780 if (wm_size > wm->max_wm)
781 wm_size = wm->max_wm;
783 wm_size = wm->default_wm;
786 * Bspec seems to indicate that the value shouldn't be lower than
787 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
788 * Lets go for 8 which is the burst size since certain platforms
789 * already use a hardcoded 8 (which is what the spec says should be
798 static bool is_disabling(int old, int new, int threshold)
800 return old >= threshold && new < threshold;
803 static bool is_enabling(int old, int new, int threshold)
805 return old < threshold && new >= threshold;
808 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
810 return dev_priv->wm.max_level + 1;
813 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
814 const struct intel_plane_state *plane_state)
816 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
818 /* FIXME check the 'enable' instead */
819 if (!crtc_state->base.active)
823 * Treat cursor with fb as always visible since cursor updates
824 * can happen faster than the vrefresh rate, and the current
825 * watermark code doesn't handle that correctly. Cursor updates
826 * which set/clear the fb or change the cursor size are going
827 * to get throttled by intel_legacy_cursor_update() to work
828 * around this problem with the watermark code.
830 if (plane->id == PLANE_CURSOR)
831 return plane_state->base.fb != NULL;
833 return plane_state->base.visible;
836 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
838 struct intel_crtc *crtc, *enabled = NULL;
840 for_each_intel_crtc(&dev_priv->drm, crtc) {
841 if (intel_crtc_active(crtc)) {
851 static void pineview_update_wm(struct intel_crtc *unused_crtc)
853 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
854 struct intel_crtc *crtc;
855 const struct cxsr_latency *latency;
859 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
864 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
865 intel_set_memory_cxsr(dev_priv, false);
869 crtc = single_enabled_crtc(dev_priv);
871 const struct drm_display_mode *adjusted_mode =
872 &crtc->config->base.adjusted_mode;
873 const struct drm_framebuffer *fb =
874 crtc->base.primary->state->fb;
875 int cpp = fb->format->cpp[0];
876 int clock = adjusted_mode->crtc_clock;
879 wm = intel_calculate_wm(clock, &pineview_display_wm,
880 pineview_display_wm.fifo_size,
881 cpp, latency->display_sr);
882 reg = I915_READ(DSPFW1);
883 reg &= ~DSPFW_SR_MASK;
884 reg |= FW_WM(wm, SR);
885 I915_WRITE(DSPFW1, reg);
886 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
889 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
890 pineview_display_wm.fifo_size,
891 4, latency->cursor_sr);
892 reg = I915_READ(DSPFW3);
893 reg &= ~DSPFW_CURSOR_SR_MASK;
894 reg |= FW_WM(wm, CURSOR_SR);
895 I915_WRITE(DSPFW3, reg);
897 /* Display HPLL off SR */
898 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
899 pineview_display_hplloff_wm.fifo_size,
900 cpp, latency->display_hpll_disable);
901 reg = I915_READ(DSPFW3);
902 reg &= ~DSPFW_HPLL_SR_MASK;
903 reg |= FW_WM(wm, HPLL_SR);
904 I915_WRITE(DSPFW3, reg);
906 /* cursor HPLL off SR */
907 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
908 pineview_display_hplloff_wm.fifo_size,
909 4, latency->cursor_hpll_disable);
910 reg = I915_READ(DSPFW3);
911 reg &= ~DSPFW_HPLL_CURSOR_MASK;
912 reg |= FW_WM(wm, HPLL_CURSOR);
913 I915_WRITE(DSPFW3, reg);
914 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
916 intel_set_memory_cxsr(dev_priv, true);
918 intel_set_memory_cxsr(dev_priv, false);
923 * Documentation says:
924 * "If the line size is small, the TLB fetches can get in the way of the
925 * data fetches, causing some lag in the pixel data return which is not
926 * accounted for in the above formulas. The following adjustment only
927 * needs to be applied if eight whole lines fit in the buffer at once.
928 * The WM is adjusted upwards by the difference between the FIFO size
929 * and the size of 8 whole lines. This adjustment is always performed
930 * in the actual pixel depth regardless of whether FBC is enabled or not."
932 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
934 int tlb_miss = fifo_size * 64 - width * cpp * 8;
936 return max(0, tlb_miss);
939 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
940 const struct g4x_wm_values *wm)
944 for_each_pipe(dev_priv, pipe)
945 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
948 FW_WM(wm->sr.plane, SR) |
949 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
950 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
951 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
953 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
954 FW_WM(wm->sr.fbc, FBC_SR) |
955 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
956 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
957 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
958 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
960 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
961 FW_WM(wm->sr.cursor, CURSOR_SR) |
962 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
963 FW_WM(wm->hpll.plane, HPLL_SR));
965 POSTING_READ(DSPFW1);
968 #define FW_WM_VLV(value, plane) \
969 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
971 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
972 const struct vlv_wm_values *wm)
976 for_each_pipe(dev_priv, pipe) {
977 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
979 I915_WRITE(VLV_DDL(pipe),
980 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
981 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
982 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
983 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
987 * Zero the (unused) WM1 watermarks, and also clear all the
988 * high order bits so that there are no out of bounds values
989 * present in the registers during the reprogramming.
991 I915_WRITE(DSPHOWM, 0);
992 I915_WRITE(DSPHOWM1, 0);
993 I915_WRITE(DSPFW4, 0);
994 I915_WRITE(DSPFW5, 0);
995 I915_WRITE(DSPFW6, 0);
998 FW_WM(wm->sr.plane, SR) |
999 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1000 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1001 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1003 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1004 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1005 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1007 FW_WM(wm->sr.cursor, CURSOR_SR));
1009 if (IS_CHERRYVIEW(dev_priv)) {
1010 I915_WRITE(DSPFW7_CHV,
1011 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1012 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1013 I915_WRITE(DSPFW8_CHV,
1014 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1015 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1016 I915_WRITE(DSPFW9_CHV,
1017 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1018 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1020 FW_WM(wm->sr.plane >> 9, SR_HI) |
1021 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1022 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1023 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1024 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1025 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1026 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1027 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1028 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1029 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1032 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1033 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1035 FW_WM(wm->sr.plane >> 9, SR_HI) |
1036 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1037 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1038 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1039 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1040 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1041 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1044 POSTING_READ(DSPFW1);
1049 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1051 /* all latencies in usec */
1052 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1053 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1054 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1056 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1059 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1062 * DSPCNTR[13] supposedly controls whether the
1063 * primary plane can use the FIFO space otherwise
1064 * reserved for the sprite plane. It's not 100% clear
1065 * what the actual FIFO size is, but it looks like we
1066 * can happily set both primary and sprite watermarks
1067 * up to 127 cachelines. So that would seem to mean
1068 * that either DSPCNTR[13] doesn't do anything, or that
1069 * the total FIFO is >= 256 cachelines in size. Either
1070 * way, we don't seem to have to worry about this
1071 * repartitioning as the maximum watermark value the
1072 * register can hold for each plane is lower than the
1073 * minimum FIFO size.
1079 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1081 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1083 MISSING_CASE(plane_id);
1088 static int g4x_fbc_fifo_size(int level)
1091 case G4X_WM_LEVEL_SR:
1093 case G4X_WM_LEVEL_HPLL:
1096 MISSING_CASE(level);
1101 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1102 const struct intel_plane_state *plane_state,
1105 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1106 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1107 const struct drm_display_mode *adjusted_mode =
1108 &crtc_state->base.adjusted_mode;
1109 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1110 unsigned int clock, htotal, cpp, width, wm;
1115 if (!intel_wm_plane_visible(crtc_state, plane_state))
1119 * Not 100% sure which way ELK should go here as the
1120 * spec only says CL/CTG should assume 32bpp and BW
1121 * doesn't need to. But as these things followed the
1122 * mobile vs. desktop lines on gen3 as well, let's
1123 * assume ELK doesn't need this.
1125 * The spec also fails to list such a restriction for
1126 * the HPLL watermark, which seems a little strange.
1127 * Let's use 32bpp for the HPLL watermark as well.
1129 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1130 level != G4X_WM_LEVEL_NORMAL)
1133 cpp = plane_state->base.fb->format->cpp[0];
1135 clock = adjusted_mode->crtc_clock;
1136 htotal = adjusted_mode->crtc_htotal;
1138 if (plane->id == PLANE_CURSOR)
1139 width = plane_state->base.crtc_w;
1141 width = drm_rect_width(&plane_state->base.dst);
1143 if (plane->id == PLANE_CURSOR) {
1144 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1145 } else if (plane->id == PLANE_PRIMARY &&
1146 level == G4X_WM_LEVEL_NORMAL) {
1147 wm = intel_wm_method1(clock, cpp, latency);
1149 unsigned int small, large;
1151 small = intel_wm_method1(clock, cpp, latency);
1152 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1154 wm = min(small, large);
1157 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1160 wm = DIV_ROUND_UP(wm, 64) + 2;
1162 return min_t(unsigned int, wm, USHRT_MAX);
1165 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1166 int level, enum plane_id plane_id, u16 value)
1168 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1171 for (; level < intel_wm_num_levels(dev_priv); level++) {
1172 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1174 dirty |= raw->plane[plane_id] != value;
1175 raw->plane[plane_id] = value;
1181 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1182 int level, u16 value)
1184 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1187 /* NORMAL level doesn't have an FBC watermark */
1188 level = max(level, G4X_WM_LEVEL_SR);
1190 for (; level < intel_wm_num_levels(dev_priv); level++) {
1191 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1193 dirty |= raw->fbc != value;
1200 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1201 const struct intel_plane_state *pstate,
1204 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1205 const struct intel_plane_state *plane_state)
1207 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1208 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1209 enum plane_id plane_id = plane->id;
1213 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1214 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1215 if (plane_id == PLANE_PRIMARY)
1216 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1220 for (level = 0; level < num_levels; level++) {
1221 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1224 wm = g4x_compute_wm(crtc_state, plane_state, level);
1225 max_wm = g4x_plane_fifo_size(plane_id, level);
1230 dirty |= raw->plane[plane_id] != wm;
1231 raw->plane[plane_id] = wm;
1233 if (plane_id != PLANE_PRIMARY ||
1234 level == G4X_WM_LEVEL_NORMAL)
1237 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1238 raw->plane[plane_id]);
1239 max_wm = g4x_fbc_fifo_size(level);
1242 * FBC wm is not mandatory as we
1243 * can always just disable its use.
1248 dirty |= raw->fbc != wm;
1252 /* mark watermarks as invalid */
1253 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1255 if (plane_id == PLANE_PRIMARY)
1256 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1260 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1262 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1263 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1264 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1266 if (plane_id == PLANE_PRIMARY)
1267 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
1268 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1269 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1275 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1276 enum plane_id plane_id, int level)
1278 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1280 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1283 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1286 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1288 if (level > dev_priv->wm.max_level)
1291 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1292 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1293 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1296 /* mark all levels starting from 'level' as invalid */
1297 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1298 struct g4x_wm_state *wm_state, int level)
1300 if (level <= G4X_WM_LEVEL_NORMAL) {
1301 enum plane_id plane_id;
1303 for_each_plane_id_on_crtc(crtc, plane_id)
1304 wm_state->wm.plane[plane_id] = USHRT_MAX;
1307 if (level <= G4X_WM_LEVEL_SR) {
1308 wm_state->cxsr = false;
1309 wm_state->sr.cursor = USHRT_MAX;
1310 wm_state->sr.plane = USHRT_MAX;
1311 wm_state->sr.fbc = USHRT_MAX;
1314 if (level <= G4X_WM_LEVEL_HPLL) {
1315 wm_state->hpll_en = false;
1316 wm_state->hpll.cursor = USHRT_MAX;
1317 wm_state->hpll.plane = USHRT_MAX;
1318 wm_state->hpll.fbc = USHRT_MAX;
1322 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1324 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1325 struct intel_atomic_state *state =
1326 to_intel_atomic_state(crtc_state->base.state);
1327 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1328 int num_active_planes = hweight32(crtc_state->active_planes &
1329 ~BIT(PLANE_CURSOR));
1330 const struct g4x_pipe_wm *raw;
1331 const struct intel_plane_state *old_plane_state;
1332 const struct intel_plane_state *new_plane_state;
1333 struct intel_plane *plane;
1334 enum plane_id plane_id;
1336 unsigned int dirty = 0;
1338 for_each_oldnew_intel_plane_in_state(state, plane,
1340 new_plane_state, i) {
1341 if (new_plane_state->base.crtc != &crtc->base &&
1342 old_plane_state->base.crtc != &crtc->base)
1345 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1346 dirty |= BIT(plane->id);
1352 level = G4X_WM_LEVEL_NORMAL;
1353 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1356 raw = &crtc_state->wm.g4x.raw[level];
1357 for_each_plane_id_on_crtc(crtc, plane_id)
1358 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1360 level = G4X_WM_LEVEL_SR;
1362 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1365 raw = &crtc_state->wm.g4x.raw[level];
1366 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1367 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1368 wm_state->sr.fbc = raw->fbc;
1370 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1372 level = G4X_WM_LEVEL_HPLL;
1374 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1377 raw = &crtc_state->wm.g4x.raw[level];
1378 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1379 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1380 wm_state->hpll.fbc = raw->fbc;
1382 wm_state->hpll_en = wm_state->cxsr;
1387 if (level == G4X_WM_LEVEL_NORMAL)
1390 /* invalidate the higher levels */
1391 g4x_invalidate_wms(crtc, wm_state, level);
1394 * Determine if the FBC watermark(s) can be used. IF
1395 * this isn't the case we prefer to disable the FBC
1396 ( watermark(s) rather than disable the SR/HPLL
1397 * level(s) entirely.
1399 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1401 if (level >= G4X_WM_LEVEL_SR &&
1402 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1403 wm_state->fbc_en = false;
1404 else if (level >= G4X_WM_LEVEL_HPLL &&
1405 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1406 wm_state->fbc_en = false;
1411 static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1413 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1414 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1415 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1416 struct intel_atomic_state *intel_state =
1417 to_intel_atomic_state(new_crtc_state->base.state);
1418 const struct intel_crtc_state *old_crtc_state =
1419 intel_atomic_get_old_crtc_state(intel_state, crtc);
1420 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1421 enum plane_id plane_id;
1423 if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
1424 *intermediate = *optimal;
1426 intermediate->cxsr = false;
1427 intermediate->hpll_en = false;
1431 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1432 !new_crtc_state->disable_cxsr;
1433 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1434 !new_crtc_state->disable_cxsr;
1435 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1437 for_each_plane_id_on_crtc(crtc, plane_id) {
1438 intermediate->wm.plane[plane_id] =
1439 max(optimal->wm.plane[plane_id],
1440 active->wm.plane[plane_id]);
1442 WARN_ON(intermediate->wm.plane[plane_id] >
1443 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1446 intermediate->sr.plane = max(optimal->sr.plane,
1448 intermediate->sr.cursor = max(optimal->sr.cursor,
1450 intermediate->sr.fbc = max(optimal->sr.fbc,
1453 intermediate->hpll.plane = max(optimal->hpll.plane,
1454 active->hpll.plane);
1455 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1456 active->hpll.cursor);
1457 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1460 WARN_ON((intermediate->sr.plane >
1461 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1462 intermediate->sr.cursor >
1463 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1464 intermediate->cxsr);
1465 WARN_ON((intermediate->sr.plane >
1466 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1467 intermediate->sr.cursor >
1468 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1469 intermediate->hpll_en);
1471 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1472 intermediate->fbc_en && intermediate->cxsr);
1473 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1474 intermediate->fbc_en && intermediate->hpll_en);
1478 * If our intermediate WM are identical to the final WM, then we can
1479 * omit the post-vblank programming; only update if it's different.
1481 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1482 new_crtc_state->wm.need_postvbl_update = true;
1487 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1488 struct g4x_wm_values *wm)
1490 struct intel_crtc *crtc;
1491 int num_active_crtcs = 0;
1497 for_each_intel_crtc(&dev_priv->drm, crtc) {
1498 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1503 if (!wm_state->cxsr)
1505 if (!wm_state->hpll_en)
1506 wm->hpll_en = false;
1507 if (!wm_state->fbc_en)
1513 if (num_active_crtcs != 1) {
1515 wm->hpll_en = false;
1519 for_each_intel_crtc(&dev_priv->drm, crtc) {
1520 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1521 enum pipe pipe = crtc->pipe;
1523 wm->pipe[pipe] = wm_state->wm;
1524 if (crtc->active && wm->cxsr)
1525 wm->sr = wm_state->sr;
1526 if (crtc->active && wm->hpll_en)
1527 wm->hpll = wm_state->hpll;
1531 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1533 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1534 struct g4x_wm_values new_wm = {};
1536 g4x_merge_wm(dev_priv, &new_wm);
1538 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1541 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1542 _intel_set_memory_cxsr(dev_priv, false);
1544 g4x_write_wm_values(dev_priv, &new_wm);
1546 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1547 _intel_set_memory_cxsr(dev_priv, true);
1552 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1553 struct intel_crtc_state *crtc_state)
1555 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1556 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1558 mutex_lock(&dev_priv->wm.wm_mutex);
1559 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1560 g4x_program_watermarks(dev_priv);
1561 mutex_unlock(&dev_priv->wm.wm_mutex);
1564 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1565 struct intel_crtc_state *crtc_state)
1567 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1570 if (!crtc_state->wm.need_postvbl_update)
1573 mutex_lock(&dev_priv->wm.wm_mutex);
1574 intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1575 g4x_program_watermarks(dev_priv);
1576 mutex_unlock(&dev_priv->wm.wm_mutex);
1579 /* latency must be in 0.1us units. */
1580 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1581 unsigned int htotal,
1584 unsigned int latency)
1588 ret = intel_wm_method2(pixel_rate, htotal,
1589 width, cpp, latency);
1590 ret = DIV_ROUND_UP(ret, 64);
1595 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1597 /* all latencies in usec */
1598 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1600 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1602 if (IS_CHERRYVIEW(dev_priv)) {
1603 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1604 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1606 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1610 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1611 const struct intel_plane_state *plane_state,
1614 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1615 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1616 const struct drm_display_mode *adjusted_mode =
1617 &crtc_state->base.adjusted_mode;
1618 unsigned int clock, htotal, cpp, width, wm;
1620 if (dev_priv->wm.pri_latency[level] == 0)
1623 if (!intel_wm_plane_visible(crtc_state, plane_state))
1626 cpp = plane_state->base.fb->format->cpp[0];
1627 clock = adjusted_mode->crtc_clock;
1628 htotal = adjusted_mode->crtc_htotal;
1629 width = crtc_state->pipe_src_w;
1631 if (plane->id == PLANE_CURSOR) {
1633 * FIXME the formula gives values that are
1634 * too big for the cursor FIFO, and hence we
1635 * would never be able to use cursors. For
1636 * now just hardcode the watermark.
1640 wm = vlv_wm_method2(clock, htotal, width, cpp,
1641 dev_priv->wm.pri_latency[level] * 10);
1644 return min_t(unsigned int, wm, USHRT_MAX);
1647 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1649 return (active_planes & (BIT(PLANE_SPRITE0) |
1650 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1653 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1655 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1656 const struct g4x_pipe_wm *raw =
1657 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1658 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1659 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1660 int num_active_planes = hweight32(active_planes);
1661 const int fifo_size = 511;
1662 int fifo_extra, fifo_left = fifo_size;
1663 int sprite0_fifo_extra = 0;
1664 unsigned int total_rate;
1665 enum plane_id plane_id;
1668 * When enabling sprite0 after sprite1 has already been enabled
1669 * we tend to get an underrun unless sprite0 already has some
1670 * FIFO space allcoated. Hence we always allocate at least one
1671 * cacheline for sprite0 whenever sprite1 is enabled.
1673 * All other plane enable sequences appear immune to this problem.
1675 if (vlv_need_sprite0_fifo_workaround(active_planes))
1676 sprite0_fifo_extra = 1;
1678 total_rate = raw->plane[PLANE_PRIMARY] +
1679 raw->plane[PLANE_SPRITE0] +
1680 raw->plane[PLANE_SPRITE1] +
1683 if (total_rate > fifo_size)
1686 if (total_rate == 0)
1689 for_each_plane_id_on_crtc(crtc, plane_id) {
1692 if ((active_planes & BIT(plane_id)) == 0) {
1693 fifo_state->plane[plane_id] = 0;
1697 rate = raw->plane[plane_id];
1698 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1699 fifo_left -= fifo_state->plane[plane_id];
1702 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1703 fifo_left -= sprite0_fifo_extra;
1705 fifo_state->plane[PLANE_CURSOR] = 63;
1707 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1709 /* spread the remainder evenly */
1710 for_each_plane_id_on_crtc(crtc, plane_id) {
1716 if ((active_planes & BIT(plane_id)) == 0)
1719 plane_extra = min(fifo_extra, fifo_left);
1720 fifo_state->plane[plane_id] += plane_extra;
1721 fifo_left -= plane_extra;
1724 WARN_ON(active_planes != 0 && fifo_left != 0);
1726 /* give it all to the first plane if none are active */
1727 if (active_planes == 0) {
1728 WARN_ON(fifo_left != fifo_size);
1729 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1735 /* mark all levels starting from 'level' as invalid */
1736 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1737 struct vlv_wm_state *wm_state, int level)
1739 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1741 for (; level < intel_wm_num_levels(dev_priv); level++) {
1742 enum plane_id plane_id;
1744 for_each_plane_id_on_crtc(crtc, plane_id)
1745 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1747 wm_state->sr[level].cursor = USHRT_MAX;
1748 wm_state->sr[level].plane = USHRT_MAX;
1752 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1757 return fifo_size - wm;
1761 * Starting from 'level' set all higher
1762 * levels to 'value' in the "raw" watermarks.
1764 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1765 int level, enum plane_id plane_id, u16 value)
1767 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1768 int num_levels = intel_wm_num_levels(dev_priv);
1771 for (; level < num_levels; level++) {
1772 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1774 dirty |= raw->plane[plane_id] != value;
1775 raw->plane[plane_id] = value;
1781 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1782 const struct intel_plane_state *plane_state)
1784 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1785 enum plane_id plane_id = plane->id;
1786 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1790 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1791 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1795 for (level = 0; level < num_levels; level++) {
1796 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1797 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1798 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1803 dirty |= raw->plane[plane_id] != wm;
1804 raw->plane[plane_id] = wm;
1807 /* mark all higher levels as invalid */
1808 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1812 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1814 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1815 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1816 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1821 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1822 enum plane_id plane_id, int level)
1824 const struct g4x_pipe_wm *raw =
1825 &crtc_state->wm.vlv.raw[level];
1826 const struct vlv_fifo_state *fifo_state =
1827 &crtc_state->wm.vlv.fifo_state;
1829 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1832 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1834 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1835 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1836 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1837 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1840 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1842 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1843 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1844 struct intel_atomic_state *state =
1845 to_intel_atomic_state(crtc_state->base.state);
1846 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1847 const struct vlv_fifo_state *fifo_state =
1848 &crtc_state->wm.vlv.fifo_state;
1849 int num_active_planes = hweight32(crtc_state->active_planes &
1850 ~BIT(PLANE_CURSOR));
1851 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1852 const struct intel_plane_state *old_plane_state;
1853 const struct intel_plane_state *new_plane_state;
1854 struct intel_plane *plane;
1855 enum plane_id plane_id;
1857 unsigned int dirty = 0;
1859 for_each_oldnew_intel_plane_in_state(state, plane,
1861 new_plane_state, i) {
1862 if (new_plane_state->base.crtc != &crtc->base &&
1863 old_plane_state->base.crtc != &crtc->base)
1866 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1867 dirty |= BIT(plane->id);
1871 * DSPARB registers may have been reset due to the
1872 * power well being turned off. Make sure we restore
1873 * them to a consistent state even if no primary/sprite
1874 * planes are initially active.
1877 crtc_state->fifo_changed = true;
1882 /* cursor changes don't warrant a FIFO recompute */
1883 if (dirty & ~BIT(PLANE_CURSOR)) {
1884 const struct intel_crtc_state *old_crtc_state =
1885 intel_atomic_get_old_crtc_state(state, crtc);
1886 const struct vlv_fifo_state *old_fifo_state =
1887 &old_crtc_state->wm.vlv.fifo_state;
1889 ret = vlv_compute_fifo(crtc_state);
1893 if (needs_modeset ||
1894 memcmp(old_fifo_state, fifo_state,
1895 sizeof(*fifo_state)) != 0)
1896 crtc_state->fifo_changed = true;
1899 /* initially allow all levels */
1900 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1902 * Note that enabling cxsr with no primary/sprite planes
1903 * enabled can wedge the pipe. Hence we only allow cxsr
1904 * with exactly one enabled primary/sprite plane.
1906 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1908 for (level = 0; level < wm_state->num_levels; level++) {
1909 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1910 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1912 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1915 for_each_plane_id_on_crtc(crtc, plane_id) {
1916 wm_state->wm[level].plane[plane_id] =
1917 vlv_invert_wm_value(raw->plane[plane_id],
1918 fifo_state->plane[plane_id]);
1921 wm_state->sr[level].plane =
1922 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1923 raw->plane[PLANE_SPRITE0],
1924 raw->plane[PLANE_SPRITE1]),
1927 wm_state->sr[level].cursor =
1928 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1935 /* limit to only levels we can actually handle */
1936 wm_state->num_levels = level;
1938 /* invalidate the higher levels */
1939 vlv_invalidate_wms(crtc, wm_state, level);
1944 #define VLV_FIFO(plane, value) \
1945 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1947 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1948 struct intel_crtc_state *crtc_state)
1950 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1952 const struct vlv_fifo_state *fifo_state =
1953 &crtc_state->wm.vlv.fifo_state;
1954 int sprite0_start, sprite1_start, fifo_size;
1956 if (!crtc_state->fifo_changed)
1959 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1960 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1961 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1963 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1964 WARN_ON(fifo_size != 511);
1966 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1969 * uncore.lock serves a double purpose here. It allows us to
1970 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1971 * it protects the DSPARB registers from getting clobbered by
1972 * parallel updates from multiple pipes.
1974 * intel_pipe_update_start() has already disabled interrupts
1975 * for us, so a plain spin_lock() is sufficient here.
1977 spin_lock(&dev_priv->uncore.lock);
1979 switch (crtc->pipe) {
1980 u32 dsparb, dsparb2, dsparb3;
1982 dsparb = I915_READ_FW(DSPARB);
1983 dsparb2 = I915_READ_FW(DSPARB2);
1985 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1986 VLV_FIFO(SPRITEB, 0xff));
1987 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1988 VLV_FIFO(SPRITEB, sprite1_start));
1990 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1991 VLV_FIFO(SPRITEB_HI, 0x1));
1992 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1993 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1995 I915_WRITE_FW(DSPARB, dsparb);
1996 I915_WRITE_FW(DSPARB2, dsparb2);
1999 dsparb = I915_READ_FW(DSPARB);
2000 dsparb2 = I915_READ_FW(DSPARB2);
2002 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2003 VLV_FIFO(SPRITED, 0xff));
2004 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2005 VLV_FIFO(SPRITED, sprite1_start));
2007 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2008 VLV_FIFO(SPRITED_HI, 0xff));
2009 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2010 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2012 I915_WRITE_FW(DSPARB, dsparb);
2013 I915_WRITE_FW(DSPARB2, dsparb2);
2016 dsparb3 = I915_READ_FW(DSPARB3);
2017 dsparb2 = I915_READ_FW(DSPARB2);
2019 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2020 VLV_FIFO(SPRITEF, 0xff));
2021 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2022 VLV_FIFO(SPRITEF, sprite1_start));
2024 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2025 VLV_FIFO(SPRITEF_HI, 0xff));
2026 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2027 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2029 I915_WRITE_FW(DSPARB3, dsparb3);
2030 I915_WRITE_FW(DSPARB2, dsparb2);
2036 POSTING_READ_FW(DSPARB);
2038 spin_unlock(&dev_priv->uncore.lock);
2043 static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2045 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
2046 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2047 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2048 struct intel_atomic_state *intel_state =
2049 to_intel_atomic_state(new_crtc_state->base.state);
2050 const struct intel_crtc_state *old_crtc_state =
2051 intel_atomic_get_old_crtc_state(intel_state, crtc);
2052 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2055 if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
2056 *intermediate = *optimal;
2058 intermediate->cxsr = false;
2062 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2063 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2064 !new_crtc_state->disable_cxsr;
2066 for (level = 0; level < intermediate->num_levels; level++) {
2067 enum plane_id plane_id;
2069 for_each_plane_id_on_crtc(crtc, plane_id) {
2070 intermediate->wm[level].plane[plane_id] =
2071 min(optimal->wm[level].plane[plane_id],
2072 active->wm[level].plane[plane_id]);
2075 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2076 active->sr[level].plane);
2077 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2078 active->sr[level].cursor);
2081 vlv_invalidate_wms(crtc, intermediate, level);
2085 * If our intermediate WM are identical to the final WM, then we can
2086 * omit the post-vblank programming; only update if it's different.
2088 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2089 new_crtc_state->wm.need_postvbl_update = true;
2094 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2095 struct vlv_wm_values *wm)
2097 struct intel_crtc *crtc;
2098 int num_active_crtcs = 0;
2100 wm->level = dev_priv->wm.max_level;
2103 for_each_intel_crtc(&dev_priv->drm, crtc) {
2104 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2109 if (!wm_state->cxsr)
2113 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2116 if (num_active_crtcs != 1)
2119 if (num_active_crtcs > 1)
2120 wm->level = VLV_WM_LEVEL_PM2;
2122 for_each_intel_crtc(&dev_priv->drm, crtc) {
2123 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2124 enum pipe pipe = crtc->pipe;
2126 wm->pipe[pipe] = wm_state->wm[wm->level];
2127 if (crtc->active && wm->cxsr)
2128 wm->sr = wm_state->sr[wm->level];
2130 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2131 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2132 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2133 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2137 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2139 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2140 struct vlv_wm_values new_wm = {};
2142 vlv_merge_wm(dev_priv, &new_wm);
2144 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2147 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2148 chv_set_memory_dvfs(dev_priv, false);
2150 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2151 chv_set_memory_pm5(dev_priv, false);
2153 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2154 _intel_set_memory_cxsr(dev_priv, false);
2156 vlv_write_wm_values(dev_priv, &new_wm);
2158 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2159 _intel_set_memory_cxsr(dev_priv, true);
2161 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2162 chv_set_memory_pm5(dev_priv, true);
2164 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2165 chv_set_memory_dvfs(dev_priv, true);
2170 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2171 struct intel_crtc_state *crtc_state)
2173 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2174 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2176 mutex_lock(&dev_priv->wm.wm_mutex);
2177 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2178 vlv_program_watermarks(dev_priv);
2179 mutex_unlock(&dev_priv->wm.wm_mutex);
2182 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2183 struct intel_crtc_state *crtc_state)
2185 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2186 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2188 if (!crtc_state->wm.need_postvbl_update)
2191 mutex_lock(&dev_priv->wm.wm_mutex);
2192 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2193 vlv_program_watermarks(dev_priv);
2194 mutex_unlock(&dev_priv->wm.wm_mutex);
2197 static void i965_update_wm(struct intel_crtc *unused_crtc)
2199 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2200 struct intel_crtc *crtc;
2205 /* Calc sr entries for one plane configs */
2206 crtc = single_enabled_crtc(dev_priv);
2208 /* self-refresh has much higher latency */
2209 static const int sr_latency_ns = 12000;
2210 const struct drm_display_mode *adjusted_mode =
2211 &crtc->config->base.adjusted_mode;
2212 const struct drm_framebuffer *fb =
2213 crtc->base.primary->state->fb;
2214 int clock = adjusted_mode->crtc_clock;
2215 int htotal = adjusted_mode->crtc_htotal;
2216 int hdisplay = crtc->config->pipe_src_w;
2217 int cpp = fb->format->cpp[0];
2220 entries = intel_wm_method2(clock, htotal,
2221 hdisplay, cpp, sr_latency_ns / 100);
2222 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2223 srwm = I965_FIFO_SIZE - entries;
2227 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
2230 entries = intel_wm_method2(clock, htotal,
2231 crtc->base.cursor->state->crtc_w, 4,
2232 sr_latency_ns / 100);
2233 entries = DIV_ROUND_UP(entries,
2234 i965_cursor_wm_info.cacheline_size) +
2235 i965_cursor_wm_info.guard_size;
2237 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2238 if (cursor_sr > i965_cursor_wm_info.max_wm)
2239 cursor_sr = i965_cursor_wm_info.max_wm;
2241 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
2242 "cursor %d\n", srwm, cursor_sr);
2244 cxsr_enabled = true;
2246 cxsr_enabled = false;
2247 /* Turn off self refresh if both pipes are enabled */
2248 intel_set_memory_cxsr(dev_priv, false);
2251 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2254 /* 965 has limitations... */
2255 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2259 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2260 FW_WM(8, PLANEC_OLD));
2261 /* update cursor SR watermark */
2262 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2265 intel_set_memory_cxsr(dev_priv, true);
2270 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2272 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2273 const struct intel_watermark_params *wm_info;
2278 int planea_wm, planeb_wm;
2279 struct intel_crtc *crtc, *enabled = NULL;
2281 if (IS_I945GM(dev_priv))
2282 wm_info = &i945_wm_info;
2283 else if (!IS_GEN(dev_priv, 2))
2284 wm_info = &i915_wm_info;
2286 wm_info = &i830_a_wm_info;
2288 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
2289 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2290 if (intel_crtc_active(crtc)) {
2291 const struct drm_display_mode *adjusted_mode =
2292 &crtc->config->base.adjusted_mode;
2293 const struct drm_framebuffer *fb =
2294 crtc->base.primary->state->fb;
2297 if (IS_GEN(dev_priv, 2))
2300 cpp = fb->format->cpp[0];
2302 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2303 wm_info, fifo_size, cpp,
2304 pessimal_latency_ns);
2307 planea_wm = fifo_size - wm_info->guard_size;
2308 if (planea_wm > (long)wm_info->max_wm)
2309 planea_wm = wm_info->max_wm;
2312 if (IS_GEN(dev_priv, 2))
2313 wm_info = &i830_bc_wm_info;
2315 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
2316 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2317 if (intel_crtc_active(crtc)) {
2318 const struct drm_display_mode *adjusted_mode =
2319 &crtc->config->base.adjusted_mode;
2320 const struct drm_framebuffer *fb =
2321 crtc->base.primary->state->fb;
2324 if (IS_GEN(dev_priv, 2))
2327 cpp = fb->format->cpp[0];
2329 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2330 wm_info, fifo_size, cpp,
2331 pessimal_latency_ns);
2332 if (enabled == NULL)
2337 planeb_wm = fifo_size - wm_info->guard_size;
2338 if (planeb_wm > (long)wm_info->max_wm)
2339 planeb_wm = wm_info->max_wm;
2342 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2344 if (IS_I915GM(dev_priv) && enabled) {
2345 struct drm_i915_gem_object *obj;
2347 obj = intel_fb_obj(enabled->base.primary->state->fb);
2349 /* self-refresh seems busted with untiled */
2350 if (!i915_gem_object_is_tiled(obj))
2355 * Overlay gets an aggressive default since video jitter is bad.
2359 /* Play safe and disable self-refresh before adjusting watermarks. */
2360 intel_set_memory_cxsr(dev_priv, false);
2362 /* Calc sr entries for one plane configs */
2363 if (HAS_FW_BLC(dev_priv) && enabled) {
2364 /* self-refresh has much higher latency */
2365 static const int sr_latency_ns = 6000;
2366 const struct drm_display_mode *adjusted_mode =
2367 &enabled->config->base.adjusted_mode;
2368 const struct drm_framebuffer *fb =
2369 enabled->base.primary->state->fb;
2370 int clock = adjusted_mode->crtc_clock;
2371 int htotal = adjusted_mode->crtc_htotal;
2372 int hdisplay = enabled->config->pipe_src_w;
2376 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2379 cpp = fb->format->cpp[0];
2381 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2382 sr_latency_ns / 100);
2383 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2384 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
2385 srwm = wm_info->fifo_size - entries;
2389 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2390 I915_WRITE(FW_BLC_SELF,
2391 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2393 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2396 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2397 planea_wm, planeb_wm, cwm, srwm);
2399 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2400 fwater_hi = (cwm & 0x1f);
2402 /* Set request length to 8 cachelines per fetch */
2403 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2404 fwater_hi = fwater_hi | (1 << 8);
2406 I915_WRITE(FW_BLC, fwater_lo);
2407 I915_WRITE(FW_BLC2, fwater_hi);
2410 intel_set_memory_cxsr(dev_priv, true);
2413 static void i845_update_wm(struct intel_crtc *unused_crtc)
2415 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2416 struct intel_crtc *crtc;
2417 const struct drm_display_mode *adjusted_mode;
2421 crtc = single_enabled_crtc(dev_priv);
2425 adjusted_mode = &crtc->config->base.adjusted_mode;
2426 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2428 dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2429 4, pessimal_latency_ns);
2430 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2431 fwater_lo |= (3<<8) | planea_wm;
2433 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2435 I915_WRITE(FW_BLC, fwater_lo);
2438 /* latency must be in 0.1us units. */
2439 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2441 unsigned int latency)
2445 ret = intel_wm_method1(pixel_rate, cpp, latency);
2446 ret = DIV_ROUND_UP(ret, 64) + 2;
2451 /* latency must be in 0.1us units. */
2452 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2453 unsigned int htotal,
2456 unsigned int latency)
2460 ret = intel_wm_method2(pixel_rate, htotal,
2461 width, cpp, latency);
2462 ret = DIV_ROUND_UP(ret, 64) + 2;
2467 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2470 * Neither of these should be possible since this function shouldn't be
2471 * called if the CRTC is off or the plane is invisible. But let's be
2472 * extra paranoid to avoid a potential divide-by-zero if we screw up
2473 * elsewhere in the driver.
2477 if (WARN_ON(!horiz_pixels))
2480 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2483 struct ilk_wm_maximums {
2491 * For both WM_PIPE and WM_LP.
2492 * mem_value must be in 0.1us units.
2494 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2495 const struct intel_plane_state *pstate,
2496 u32 mem_value, bool is_lp)
2498 u32 method1, method2;
2504 if (!intel_wm_plane_visible(cstate, pstate))
2507 cpp = pstate->base.fb->format->cpp[0];
2509 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2514 method2 = ilk_wm_method2(cstate->pixel_rate,
2515 cstate->base.adjusted_mode.crtc_htotal,
2516 drm_rect_width(&pstate->base.dst),
2519 return min(method1, method2);
2523 * For both WM_PIPE and WM_LP.
2524 * mem_value must be in 0.1us units.
2526 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2527 const struct intel_plane_state *pstate,
2530 u32 method1, method2;
2536 if (!intel_wm_plane_visible(cstate, pstate))
2539 cpp = pstate->base.fb->format->cpp[0];
2541 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2542 method2 = ilk_wm_method2(cstate->pixel_rate,
2543 cstate->base.adjusted_mode.crtc_htotal,
2544 drm_rect_width(&pstate->base.dst),
2546 return min(method1, method2);
2550 * For both WM_PIPE and WM_LP.
2551 * mem_value must be in 0.1us units.
2553 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2554 const struct intel_plane_state *pstate,
2562 if (!intel_wm_plane_visible(cstate, pstate))
2565 cpp = pstate->base.fb->format->cpp[0];
2567 return ilk_wm_method2(cstate->pixel_rate,
2568 cstate->base.adjusted_mode.crtc_htotal,
2569 pstate->base.crtc_w, cpp, mem_value);
2572 /* Only for WM_LP. */
2573 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2574 const struct intel_plane_state *pstate,
2579 if (!intel_wm_plane_visible(cstate, pstate))
2582 cpp = pstate->base.fb->format->cpp[0];
2584 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2588 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2590 if (INTEL_GEN(dev_priv) >= 8)
2592 else if (INTEL_GEN(dev_priv) >= 7)
2599 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2600 int level, bool is_sprite)
2602 if (INTEL_GEN(dev_priv) >= 8)
2603 /* BDW primary/sprite plane watermarks */
2604 return level == 0 ? 255 : 2047;
2605 else if (INTEL_GEN(dev_priv) >= 7)
2606 /* IVB/HSW primary/sprite plane watermarks */
2607 return level == 0 ? 127 : 1023;
2608 else if (!is_sprite)
2609 /* ILK/SNB primary plane watermarks */
2610 return level == 0 ? 127 : 511;
2612 /* ILK/SNB sprite plane watermarks */
2613 return level == 0 ? 63 : 255;
2617 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2619 if (INTEL_GEN(dev_priv) >= 7)
2620 return level == 0 ? 63 : 255;
2622 return level == 0 ? 31 : 63;
2625 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2627 if (INTEL_GEN(dev_priv) >= 8)
2633 /* Calculate the maximum primary/sprite plane watermark */
2634 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2636 const struct intel_wm_config *config,
2637 enum intel_ddb_partitioning ddb_partitioning,
2640 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2642 /* if sprites aren't enabled, sprites get nothing */
2643 if (is_sprite && !config->sprites_enabled)
2646 /* HSW allows LP1+ watermarks even with multiple pipes */
2647 if (level == 0 || config->num_pipes_active > 1) {
2648 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2651 * For some reason the non self refresh
2652 * FIFO size is only half of the self
2653 * refresh FIFO size on ILK/SNB.
2655 if (INTEL_GEN(dev_priv) <= 6)
2659 if (config->sprites_enabled) {
2660 /* level 0 is always calculated with 1:1 split */
2661 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2670 /* clamp to max that the registers can hold */
2671 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2674 /* Calculate the maximum cursor plane watermark */
2675 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2677 const struct intel_wm_config *config)
2679 /* HSW LP1+ watermarks w/ multiple pipes */
2680 if (level > 0 && config->num_pipes_active > 1)
2683 /* otherwise just report max that registers can hold */
2684 return ilk_cursor_wm_reg_max(dev_priv, level);
2687 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2689 const struct intel_wm_config *config,
2690 enum intel_ddb_partitioning ddb_partitioning,
2691 struct ilk_wm_maximums *max)
2693 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2694 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2695 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2696 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2699 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2701 struct ilk_wm_maximums *max)
2703 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2704 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2705 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2706 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2709 static bool ilk_validate_wm_level(int level,
2710 const struct ilk_wm_maximums *max,
2711 struct intel_wm_level *result)
2715 /* already determined to be invalid? */
2716 if (!result->enable)
2719 result->enable = result->pri_val <= max->pri &&
2720 result->spr_val <= max->spr &&
2721 result->cur_val <= max->cur;
2723 ret = result->enable;
2726 * HACK until we can pre-compute everything,
2727 * and thus fail gracefully if LP0 watermarks
2730 if (level == 0 && !result->enable) {
2731 if (result->pri_val > max->pri)
2732 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2733 level, result->pri_val, max->pri);
2734 if (result->spr_val > max->spr)
2735 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2736 level, result->spr_val, max->spr);
2737 if (result->cur_val > max->cur)
2738 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2739 level, result->cur_val, max->cur);
2741 result->pri_val = min_t(u32, result->pri_val, max->pri);
2742 result->spr_val = min_t(u32, result->spr_val, max->spr);
2743 result->cur_val = min_t(u32, result->cur_val, max->cur);
2744 result->enable = true;
2750 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2751 const struct intel_crtc *intel_crtc,
2753 struct intel_crtc_state *cstate,
2754 const struct intel_plane_state *pristate,
2755 const struct intel_plane_state *sprstate,
2756 const struct intel_plane_state *curstate,
2757 struct intel_wm_level *result)
2759 u16 pri_latency = dev_priv->wm.pri_latency[level];
2760 u16 spr_latency = dev_priv->wm.spr_latency[level];
2761 u16 cur_latency = dev_priv->wm.cur_latency[level];
2763 /* WM1+ latency values stored in 0.5us units */
2771 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2772 pri_latency, level);
2773 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2777 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2780 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2782 result->enable = true;
2786 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2788 const struct intel_atomic_state *intel_state =
2789 to_intel_atomic_state(cstate->base.state);
2790 const struct drm_display_mode *adjusted_mode =
2791 &cstate->base.adjusted_mode;
2792 u32 linetime, ips_linetime;
2794 if (!cstate->base.active)
2796 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2798 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2801 /* The WM are computed with base on how long it takes to fill a single
2802 * row at the given clock rate, multiplied by 8.
2804 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2805 adjusted_mode->crtc_clock);
2806 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2807 intel_state->cdclk.logical.cdclk);
2809 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2810 PIPE_WM_LINETIME_TIME(linetime);
2813 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2816 if (INTEL_GEN(dev_priv) >= 9) {
2819 int level, max_level = ilk_wm_max_level(dev_priv);
2821 /* read the first set of memory latencies[0:3] */
2822 val = 0; /* data0 to be programmed to 0 for first set */
2823 ret = sandybridge_pcode_read(dev_priv,
2824 GEN9_PCODE_READ_MEM_LATENCY,
2828 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2832 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2833 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2834 GEN9_MEM_LATENCY_LEVEL_MASK;
2835 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2836 GEN9_MEM_LATENCY_LEVEL_MASK;
2837 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2838 GEN9_MEM_LATENCY_LEVEL_MASK;
2840 /* read the second set of memory latencies[4:7] */
2841 val = 1; /* data0 to be programmed to 1 for second set */
2842 ret = sandybridge_pcode_read(dev_priv,
2843 GEN9_PCODE_READ_MEM_LATENCY,
2846 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2850 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2851 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2852 GEN9_MEM_LATENCY_LEVEL_MASK;
2853 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2854 GEN9_MEM_LATENCY_LEVEL_MASK;
2855 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2856 GEN9_MEM_LATENCY_LEVEL_MASK;
2859 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2860 * need to be disabled. We make sure to sanitize the values out
2861 * of the punit to satisfy this requirement.
2863 for (level = 1; level <= max_level; level++) {
2864 if (wm[level] == 0) {
2865 for (i = level + 1; i <= max_level; i++)
2872 * WaWmMemoryReadLatency:skl+,glk
2874 * punit doesn't take into account the read latency so we need
2875 * to add 2us to the various latency levels we retrieve from the
2876 * punit when level 0 response data us 0us.
2880 for (level = 1; level <= max_level; level++) {
2888 * WA Level-0 adjustment for 16GB DIMMs: SKL+
2889 * If we could not get dimm info enable this WA to prevent from
2890 * any underrun. If not able to get Dimm info assume 16GB dimm
2891 * to avoid any underrun.
2893 if (dev_priv->dram_info.is_16gb_dimm)
2896 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2897 u64 sskpd = I915_READ64(MCH_SSKPD);
2899 wm[0] = (sskpd >> 56) & 0xFF;
2901 wm[0] = sskpd & 0xF;
2902 wm[1] = (sskpd >> 4) & 0xFF;
2903 wm[2] = (sskpd >> 12) & 0xFF;
2904 wm[3] = (sskpd >> 20) & 0x1FF;
2905 wm[4] = (sskpd >> 32) & 0x1FF;
2906 } else if (INTEL_GEN(dev_priv) >= 6) {
2907 u32 sskpd = I915_READ(MCH_SSKPD);
2909 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2910 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2911 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2912 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2913 } else if (INTEL_GEN(dev_priv) >= 5) {
2914 u32 mltr = I915_READ(MLTR_ILK);
2916 /* ILK primary LP0 latency is 700 ns */
2918 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2919 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2921 MISSING_CASE(INTEL_DEVID(dev_priv));
2925 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2928 /* ILK sprite LP0 latency is 1300 ns */
2929 if (IS_GEN(dev_priv, 5))
2933 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2936 /* ILK cursor LP0 latency is 1300 ns */
2937 if (IS_GEN(dev_priv, 5))
2941 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2943 /* how many WM levels are we expecting */
2944 if (INTEL_GEN(dev_priv) >= 9)
2946 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2948 else if (INTEL_GEN(dev_priv) >= 6)
2954 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2958 int level, max_level = ilk_wm_max_level(dev_priv);
2960 for (level = 0; level <= max_level; level++) {
2961 unsigned int latency = wm[level];
2964 DRM_DEBUG_KMS("%s WM%d latency not provided\n",
2970 * - latencies are in us on gen9.
2971 * - before then, WM1+ latency values are in 0.5us units
2973 if (INTEL_GEN(dev_priv) >= 9)
2978 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2979 name, level, wm[level],
2980 latency / 10, latency % 10);
2984 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2987 int level, max_level = ilk_wm_max_level(dev_priv);
2992 wm[0] = max(wm[0], min);
2993 for (level = 1; level <= max_level; level++)
2994 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
2999 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3004 * The BIOS provided WM memory latency values are often
3005 * inadequate for high resolution displays. Adjust them.
3007 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3008 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3009 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3014 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
3015 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3016 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3017 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3020 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3023 * On some SNB machines (Thinkpad X220 Tablet at least)
3024 * LP3 usage can cause vblank interrupts to be lost.
3025 * The DEIIR bit will go high but it looks like the CPU
3026 * never gets interrupted.
3028 * It's not clear whether other interrupt source could
3029 * be affected or if this is somehow limited to vblank
3030 * interrupts only. To play it safe we disable LP3
3031 * watermarks entirely.
3033 if (dev_priv->wm.pri_latency[3] == 0 &&
3034 dev_priv->wm.spr_latency[3] == 0 &&
3035 dev_priv->wm.cur_latency[3] == 0)
3038 dev_priv->wm.pri_latency[3] = 0;
3039 dev_priv->wm.spr_latency[3] = 0;
3040 dev_priv->wm.cur_latency[3] = 0;
3042 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3043 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3044 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3045 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3048 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3050 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3052 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3053 sizeof(dev_priv->wm.pri_latency));
3054 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3055 sizeof(dev_priv->wm.pri_latency));
3057 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3058 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3060 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3064 if (IS_GEN(dev_priv, 6)) {
3065 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv);
3070 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3072 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3073 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3076 static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3077 struct intel_pipe_wm *pipe_wm)
3079 /* LP0 watermark maximums depend on this pipe alone */
3080 const struct intel_wm_config config = {
3081 .num_pipes_active = 1,
3082 .sprites_enabled = pipe_wm->sprites_enabled,
3083 .sprites_scaled = pipe_wm->sprites_scaled,
3085 struct ilk_wm_maximums max;
3087 /* LP0 watermarks always use 1/2 DDB partitioning */
3088 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3090 /* At least LP0 must be valid */
3091 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3092 DRM_DEBUG_KMS("LP0 watermark invalid\n");
3099 /* Compute new watermarks for the pipe */
3100 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3102 struct drm_atomic_state *state = cstate->base.state;
3103 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3104 struct intel_pipe_wm *pipe_wm;
3105 struct drm_device *dev = state->dev;
3106 const struct drm_i915_private *dev_priv = to_i915(dev);
3107 struct drm_plane *plane;
3108 const struct drm_plane_state *plane_state;
3109 const struct intel_plane_state *pristate = NULL;
3110 const struct intel_plane_state *sprstate = NULL;
3111 const struct intel_plane_state *curstate = NULL;
3112 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3113 struct ilk_wm_maximums max;
3115 pipe_wm = &cstate->wm.ilk.optimal;
3117 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
3118 const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
3120 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3122 else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
3124 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
3128 pipe_wm->pipe_enabled = cstate->base.active;
3130 pipe_wm->sprites_enabled = sprstate->base.visible;
3131 pipe_wm->sprites_scaled = sprstate->base.visible &&
3132 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
3133 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
3136 usable_level = max_level;
3138 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3139 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3142 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3143 if (pipe_wm->sprites_scaled)
3146 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3147 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
3148 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3150 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3151 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3153 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3156 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3158 for (level = 1; level <= usable_level; level++) {
3159 struct intel_wm_level *wm = &pipe_wm->wm[level];
3161 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
3162 pristate, sprstate, curstate, wm);
3165 * Disable any watermark level that exceeds the
3166 * register maximums since such watermarks are
3169 if (!ilk_validate_wm_level(level, &max, wm)) {
3170 memset(wm, 0, sizeof(*wm));
3179 * Build a set of 'intermediate' watermark values that satisfy both the old
3180 * state and the new state. These can be programmed to the hardware
3183 static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3185 struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
3186 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3187 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3188 struct intel_atomic_state *intel_state =
3189 to_intel_atomic_state(newstate->base.state);
3190 const struct intel_crtc_state *oldstate =
3191 intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3192 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3193 int level, max_level = ilk_wm_max_level(dev_priv);
3196 * Start with the final, target watermarks, then combine with the
3197 * currently active watermarks to get values that are safe both before
3198 * and after the vblank.
3200 *a = newstate->wm.ilk.optimal;
3201 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
3202 intel_state->skip_intermediate_wm)
3205 a->pipe_enabled |= b->pipe_enabled;
3206 a->sprites_enabled |= b->sprites_enabled;
3207 a->sprites_scaled |= b->sprites_scaled;
3209 for (level = 0; level <= max_level; level++) {
3210 struct intel_wm_level *a_wm = &a->wm[level];
3211 const struct intel_wm_level *b_wm = &b->wm[level];
3213 a_wm->enable &= b_wm->enable;
3214 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3215 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3216 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3217 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3221 * We need to make sure that these merged watermark values are
3222 * actually a valid configuration themselves. If they're not,
3223 * there's no safe way to transition from the old state to
3224 * the new state, so we need to fail the atomic transaction.
3226 if (!ilk_validate_pipe_wm(dev_priv, a))
3230 * If our intermediate WM are identical to the final WM, then we can
3231 * omit the post-vblank programming; only update if it's different.
3233 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3234 newstate->wm.need_postvbl_update = true;
3240 * Merge the watermarks from all active pipes for a specific level.
3242 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3244 struct intel_wm_level *ret_wm)
3246 const struct intel_crtc *intel_crtc;
3248 ret_wm->enable = true;
3250 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3251 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3252 const struct intel_wm_level *wm = &active->wm[level];
3254 if (!active->pipe_enabled)
3258 * The watermark values may have been used in the past,
3259 * so we must maintain them in the registers for some
3260 * time even if the level is now disabled.
3263 ret_wm->enable = false;
3265 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3266 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3267 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3268 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3273 * Merge all low power watermarks for all active pipes.
3275 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3276 const struct intel_wm_config *config,
3277 const struct ilk_wm_maximums *max,
3278 struct intel_pipe_wm *merged)
3280 int level, max_level = ilk_wm_max_level(dev_priv);
3281 int last_enabled_level = max_level;
3283 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3284 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3285 config->num_pipes_active > 1)
3286 last_enabled_level = 0;
3288 /* ILK: FBC WM must be disabled always */
3289 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3291 /* merge each WM1+ level */
3292 for (level = 1; level <= max_level; level++) {
3293 struct intel_wm_level *wm = &merged->wm[level];
3295 ilk_merge_wm_level(dev_priv, level, wm);
3297 if (level > last_enabled_level)
3299 else if (!ilk_validate_wm_level(level, max, wm))
3300 /* make sure all following levels get disabled */
3301 last_enabled_level = level - 1;
3304 * The spec says it is preferred to disable
3305 * FBC WMs instead of disabling a WM level.
3307 if (wm->fbc_val > max->fbc) {
3309 merged->fbc_wm_enabled = false;
3314 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3316 * FIXME this is racy. FBC might get enabled later.
3317 * What we should check here is whether FBC can be
3318 * enabled sometime later.
3320 if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3321 intel_fbc_is_active(dev_priv)) {
3322 for (level = 2; level <= max_level; level++) {
3323 struct intel_wm_level *wm = &merged->wm[level];
3330 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3332 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3333 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3336 /* The value we need to program into the WM_LPx latency field */
3337 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3340 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3343 return dev_priv->wm.pri_latency[level];
3346 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3347 const struct intel_pipe_wm *merged,
3348 enum intel_ddb_partitioning partitioning,
3349 struct ilk_wm_values *results)
3351 struct intel_crtc *intel_crtc;
3354 results->enable_fbc_wm = merged->fbc_wm_enabled;
3355 results->partitioning = partitioning;
3357 /* LP1+ register values */
3358 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3359 const struct intel_wm_level *r;
3361 level = ilk_wm_lp_to_level(wm_lp, merged);
3363 r = &merged->wm[level];
3366 * Maintain the watermark values even if the level is
3367 * disabled. Doing otherwise could cause underruns.
3369 results->wm_lp[wm_lp - 1] =
3370 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3371 (r->pri_val << WM1_LP_SR_SHIFT) |
3375 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3377 if (INTEL_GEN(dev_priv) >= 8)
3378 results->wm_lp[wm_lp - 1] |=
3379 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3381 results->wm_lp[wm_lp - 1] |=
3382 r->fbc_val << WM1_LP_FBC_SHIFT;
3385 * Always set WM1S_LP_EN when spr_val != 0, even if the
3386 * level is disabled. Doing otherwise could cause underruns.
3388 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3389 WARN_ON(wm_lp != 1);
3390 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3392 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3395 /* LP0 register values */
3396 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3397 enum pipe pipe = intel_crtc->pipe;
3398 const struct intel_wm_level *r =
3399 &intel_crtc->wm.active.ilk.wm[0];
3401 if (WARN_ON(!r->enable))
3404 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3406 results->wm_pipe[pipe] =
3407 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3408 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3413 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3414 * case both are at the same level. Prefer r1 in case they're the same. */
3415 static struct intel_pipe_wm *
3416 ilk_find_best_result(struct drm_i915_private *dev_priv,
3417 struct intel_pipe_wm *r1,
3418 struct intel_pipe_wm *r2)
3420 int level, max_level = ilk_wm_max_level(dev_priv);
3421 int level1 = 0, level2 = 0;
3423 for (level = 1; level <= max_level; level++) {
3424 if (r1->wm[level].enable)
3426 if (r2->wm[level].enable)
3430 if (level1 == level2) {
3431 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3435 } else if (level1 > level2) {
3442 /* dirty bits used to track which watermarks need changes */
3443 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3444 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3445 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3446 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3447 #define WM_DIRTY_FBC (1 << 24)
3448 #define WM_DIRTY_DDB (1 << 25)
3450 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3451 const struct ilk_wm_values *old,
3452 const struct ilk_wm_values *new)
3454 unsigned int dirty = 0;
3458 for_each_pipe(dev_priv, pipe) {
3459 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3460 dirty |= WM_DIRTY_LINETIME(pipe);
3461 /* Must disable LP1+ watermarks too */
3462 dirty |= WM_DIRTY_LP_ALL;
3465 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3466 dirty |= WM_DIRTY_PIPE(pipe);
3467 /* Must disable LP1+ watermarks too */
3468 dirty |= WM_DIRTY_LP_ALL;
3472 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3473 dirty |= WM_DIRTY_FBC;
3474 /* Must disable LP1+ watermarks too */
3475 dirty |= WM_DIRTY_LP_ALL;
3478 if (old->partitioning != new->partitioning) {
3479 dirty |= WM_DIRTY_DDB;
3480 /* Must disable LP1+ watermarks too */
3481 dirty |= WM_DIRTY_LP_ALL;
3484 /* LP1+ watermarks already deemed dirty, no need to continue */
3485 if (dirty & WM_DIRTY_LP_ALL)
3488 /* Find the lowest numbered LP1+ watermark in need of an update... */
3489 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3490 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3491 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3495 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3496 for (; wm_lp <= 3; wm_lp++)
3497 dirty |= WM_DIRTY_LP(wm_lp);
3502 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3505 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3506 bool changed = false;
3508 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3509 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3510 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3513 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3514 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3515 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3518 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3519 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3520 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3525 * Don't touch WM1S_LP_EN here.
3526 * Doing so could cause underruns.
3533 * The spec says we shouldn't write when we don't need, because every write
3534 * causes WMs to be re-evaluated, expending some power.
3536 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3537 struct ilk_wm_values *results)
3539 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3543 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3547 _ilk_disable_lp_wm(dev_priv, dirty);
3549 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3550 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3551 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3552 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3553 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3554 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3556 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3557 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3558 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3559 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3560 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3561 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3563 if (dirty & WM_DIRTY_DDB) {
3564 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3565 val = I915_READ(WM_MISC);
3566 if (results->partitioning == INTEL_DDB_PART_1_2)
3567 val &= ~WM_MISC_DATA_PARTITION_5_6;
3569 val |= WM_MISC_DATA_PARTITION_5_6;
3570 I915_WRITE(WM_MISC, val);
3572 val = I915_READ(DISP_ARB_CTL2);
3573 if (results->partitioning == INTEL_DDB_PART_1_2)
3574 val &= ~DISP_DATA_PARTITION_5_6;
3576 val |= DISP_DATA_PARTITION_5_6;
3577 I915_WRITE(DISP_ARB_CTL2, val);
3581 if (dirty & WM_DIRTY_FBC) {
3582 val = I915_READ(DISP_ARB_CTL);
3583 if (results->enable_fbc_wm)
3584 val &= ~DISP_FBC_WM_DIS;
3586 val |= DISP_FBC_WM_DIS;
3587 I915_WRITE(DISP_ARB_CTL, val);
3590 if (dirty & WM_DIRTY_LP(1) &&
3591 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3592 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3594 if (INTEL_GEN(dev_priv) >= 7) {
3595 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3596 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3597 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3598 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3601 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3602 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3603 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3604 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3605 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3606 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3608 dev_priv->wm.hw = *results;
3611 bool ilk_disable_lp_wm(struct drm_device *dev)
3613 struct drm_i915_private *dev_priv = to_i915(dev);
3615 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3618 static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
3622 /* Slice 1 will always be enabled */
3625 /* Gen prior to GEN11 have only one DBuf slice */
3626 if (INTEL_GEN(dev_priv) < 11)
3627 return enabled_slices;
3630 * FIXME: for now we'll only ever use 1 slice; pretend that we have
3631 * only that 1 slice enabled until we have a proper way for on-demand
3632 * toggling of the second slice.
3634 if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
3637 return enabled_slices;
3641 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3642 * so assume we'll always need it in order to avoid underruns.
3644 static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3646 return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
3650 intel_has_sagv(struct drm_i915_private *dev_priv)
3652 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3653 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3657 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3658 * depending on power and performance requirements. The display engine access
3659 * to system memory is blocked during the adjustment time. Because of the
3660 * blocking time, having this enabled can cause full system hangs and/or pipe
3661 * underruns if we don't meet all of the following requirements:
3663 * - <= 1 pipe enabled
3664 * - All planes can enable watermarks for latencies >= SAGV engine block time
3665 * - We're not using an interlaced display configuration
3668 intel_enable_sagv(struct drm_i915_private *dev_priv)
3672 if (!intel_has_sagv(dev_priv))
3675 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3678 DRM_DEBUG_KMS("Enabling SAGV\n");
3679 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3682 /* We don't need to wait for SAGV when enabling */
3685 * Some skl systems, pre-release machines in particular,
3686 * don't actually have SAGV.
3688 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3689 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3690 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3692 } else if (ret < 0) {
3693 DRM_ERROR("Failed to enable SAGV\n");
3697 dev_priv->sagv_status = I915_SAGV_ENABLED;
3702 intel_disable_sagv(struct drm_i915_private *dev_priv)
3706 if (!intel_has_sagv(dev_priv))
3709 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3712 DRM_DEBUG_KMS("Disabling SAGV\n");
3713 /* bspec says to keep retrying for at least 1 ms */
3714 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3716 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3719 * Some skl systems, pre-release machines in particular,
3720 * don't actually have SAGV.
3722 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3723 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3724 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3726 } else if (ret < 0) {
3727 DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
3731 dev_priv->sagv_status = I915_SAGV_DISABLED;
3735 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3737 struct drm_device *dev = state->dev;
3738 struct drm_i915_private *dev_priv = to_i915(dev);
3739 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3740 struct intel_crtc *crtc;
3741 struct intel_plane *plane;
3742 struct intel_crtc_state *cstate;
3745 int sagv_block_time_us;
3747 if (!intel_has_sagv(dev_priv))
3750 if (IS_GEN(dev_priv, 9))
3751 sagv_block_time_us = 30;
3752 else if (IS_GEN(dev_priv, 10))
3753 sagv_block_time_us = 20;
3755 sagv_block_time_us = 10;
3758 * If there are no active CRTCs, no additional checks need be performed
3760 if (hweight32(intel_state->active_crtcs) == 0)
3764 * SKL+ workaround: bspec recommends we disable SAGV when we have
3765 * more then one pipe enabled
3767 if (hweight32(intel_state->active_crtcs) > 1)
3770 /* Since we're now guaranteed to only have one active CRTC... */
3771 pipe = ffs(intel_state->active_crtcs) - 1;
3772 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3773 cstate = to_intel_crtc_state(crtc->base.state);
3775 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3778 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3779 struct skl_plane_wm *wm =
3780 &cstate->wm.skl.optimal.planes[plane->id];
3782 /* Skip this plane if it's not enabled */
3783 if (!wm->wm[0].plane_en)
3786 /* Find the highest enabled wm level for this plane */
3787 for (level = ilk_wm_max_level(dev_priv);
3788 !wm->wm[level].plane_en; --level)
3791 latency = dev_priv->wm.skl_latency[level];
3793 if (skl_needs_memory_bw_wa(dev_priv) &&
3794 plane->base.state->fb->modifier ==
3795 I915_FORMAT_MOD_X_TILED)
3799 * If any of the planes on this pipe don't enable wm levels that
3800 * incur memory latencies higher than sagv_block_time_us we
3801 * can't enable SAGV.
3803 if (latency < sagv_block_time_us)
3810 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3811 const struct intel_crtc_state *cstate,
3812 const u64 total_data_rate,
3813 const int num_active,
3814 struct skl_ddb_allocation *ddb)
3816 const struct drm_display_mode *adjusted_mode;
3818 u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3820 WARN_ON(ddb_size == 0);
3822 if (INTEL_GEN(dev_priv) < 11)
3823 return ddb_size - 4; /* 4 blocks for bypass path allocation */
3825 adjusted_mode = &cstate->base.adjusted_mode;
3826 total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3829 * 12GB/s is maximum BW supported by single DBuf slice.
3831 * FIXME dbuf slice code is broken:
3832 * - must wait for planes to stop using the slice before powering it off
3833 * - plane straddling both slices is illegal in multi-pipe scenarios
3834 * - should validate we stay within the hw bandwidth limits
3836 if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
3837 ddb->enabled_slices = 2;
3839 ddb->enabled_slices = 1;
3847 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3848 const struct intel_crtc_state *cstate,
3849 const u64 total_data_rate,
3850 struct skl_ddb_allocation *ddb,
3851 struct skl_ddb_entry *alloc, /* out */
3852 int *num_active /* out */)
3854 struct drm_atomic_state *state = cstate->base.state;
3855 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3856 struct drm_crtc *for_crtc = cstate->base.crtc;
3857 const struct drm_crtc_state *crtc_state;
3858 const struct drm_crtc *crtc;
3859 u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
3860 enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
3864 if (WARN_ON(!state) || !cstate->base.active) {
3867 *num_active = hweight32(dev_priv->active_crtcs);
3871 if (intel_state->active_pipe_changes)
3872 *num_active = hweight32(intel_state->active_crtcs);
3874 *num_active = hweight32(dev_priv->active_crtcs);
3876 ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
3880 * If the state doesn't change the active CRTC's or there is no
3881 * modeset request, then there's no need to recalculate;
3882 * the existing pipe allocation limits should remain unchanged.
3883 * Note that we're safe from racing commits since any racing commit
3884 * that changes the active CRTC list or do modeset would need to
3885 * grab _all_ crtc locks, including the one we currently hold.
3887 if (!intel_state->active_pipe_changes && !intel_state->modeset) {
3889 * alloc may be cleared by clear_intel_crtc_state,
3890 * copy from old state to be sure
3892 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3897 * Watermark/ddb requirement highly depends upon width of the
3898 * framebuffer, So instead of allocating DDB equally among pipes
3899 * distribute DDB based on resolution/width of the display.
3901 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3902 const struct drm_display_mode *adjusted_mode;
3903 int hdisplay, vdisplay;
3906 if (!crtc_state->enable)
3909 pipe = to_intel_crtc(crtc)->pipe;
3910 adjusted_mode = &crtc_state->adjusted_mode;
3911 drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
3912 total_width += hdisplay;
3914 if (pipe < for_pipe)
3915 width_before_pipe += hdisplay;
3916 else if (pipe == for_pipe)
3917 pipe_width = hdisplay;
3920 alloc->start = ddb_size * width_before_pipe / total_width;
3921 alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
3924 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
3925 int width, const struct drm_format_info *format,
3926 u64 modifier, unsigned int rotation,
3927 u32 plane_pixel_rate, struct skl_wm_params *wp,
3929 static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
3931 const struct skl_wm_params *wp,
3932 const struct skl_wm_level *result_prev,
3933 struct skl_wm_level *result /* out */);
3936 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
3939 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3940 int level, max_level = ilk_wm_max_level(dev_priv);
3941 struct skl_wm_level wm = {};
3942 int ret, min_ddb_alloc = 0;
3943 struct skl_wm_params wp;
3945 ret = skl_compute_wm_params(crtc_state, 256,
3946 drm_format_info(DRM_FORMAT_ARGB8888),
3947 DRM_FORMAT_MOD_LINEAR,
3949 crtc_state->pixel_rate, &wp, 0);
3952 for (level = 0; level <= max_level; level++) {
3953 skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
3954 if (wm.min_ddb_alloc == U16_MAX)
3957 min_ddb_alloc = wm.min_ddb_alloc;
3960 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
3963 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
3964 struct skl_ddb_entry *entry, u32 reg)
3967 entry->start = reg & DDB_ENTRY_MASK;
3968 entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
3975 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
3976 const enum pipe pipe,
3977 const enum plane_id plane_id,
3978 struct skl_ddb_entry *ddb_y,
3979 struct skl_ddb_entry *ddb_uv)
3984 /* Cursor doesn't support NV12/planar, so no extra calculation needed */
3985 if (plane_id == PLANE_CURSOR) {
3986 val = I915_READ(CUR_BUF_CFG(pipe));
3987 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
3991 val = I915_READ(PLANE_CTL(pipe, plane_id));
3993 /* No DDB allocated for disabled planes */
3994 if (val & PLANE_CTL_ENABLE)
3995 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
3996 val & PLANE_CTL_ORDER_RGBX,
3997 val & PLANE_CTL_ALPHA_MASK);
3999 if (INTEL_GEN(dev_priv) >= 11) {
4000 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4001 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4003 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4004 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
4006 if (is_planar_yuv_format(fourcc))
4009 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4010 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4014 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4015 struct skl_ddb_entry *ddb_y,
4016 struct skl_ddb_entry *ddb_uv)
4018 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4019 enum intel_display_power_domain power_domain;
4020 enum pipe pipe = crtc->pipe;
4021 intel_wakeref_t wakeref;
4022 enum plane_id plane_id;
4024 power_domain = POWER_DOMAIN_PIPE(pipe);
4025 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4029 for_each_plane_id_on_crtc(crtc, plane_id)
4030 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4035 intel_display_power_put(dev_priv, power_domain, wakeref);
4038 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
4039 struct skl_ddb_allocation *ddb /* out */)
4041 ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
4045 * Determines the downscale amount of a plane for the purposes of watermark calculations.
4046 * The bspec defines downscale amount as:
4049 * Horizontal down scale amount = maximum[1, Horizontal source size /
4050 * Horizontal destination size]
4051 * Vertical down scale amount = maximum[1, Vertical source size /
4052 * Vertical destination size]
4053 * Total down scale amount = Horizontal down scale amount *
4054 * Vertical down scale amount
4057 * Return value is provided in 16.16 fixed point form to retain fractional part.
4058 * Caller should take care of dividing & rounding off the value.
4060 static uint_fixed_16_16_t
4061 skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
4062 const struct intel_plane_state *pstate)
4064 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
4065 u32 src_w, src_h, dst_w, dst_h;
4066 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4067 uint_fixed_16_16_t downscale_h, downscale_w;
4069 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4070 return u32_to_fixed16(0);
4072 /* n.b., src is 16.16 fixed point, dst is whole integer */
4073 if (plane->id == PLANE_CURSOR) {
4075 * Cursors only support 0/180 degree rotation,
4076 * hence no need to account for rotation here.
4078 src_w = pstate->base.src_w >> 16;
4079 src_h = pstate->base.src_h >> 16;
4080 dst_w = pstate->base.crtc_w;
4081 dst_h = pstate->base.crtc_h;
4084 * Src coordinates are already rotated by 270 degrees for
4085 * the 90/270 degree plane rotation cases (to match the
4086 * GTT mapping), hence no need to account for rotation here.
4088 src_w = drm_rect_width(&pstate->base.src) >> 16;
4089 src_h = drm_rect_height(&pstate->base.src) >> 16;
4090 dst_w = drm_rect_width(&pstate->base.dst);
4091 dst_h = drm_rect_height(&pstate->base.dst);
4094 fp_w_ratio = div_fixed16(src_w, dst_w);
4095 fp_h_ratio = div_fixed16(src_h, dst_h);
4096 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4097 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4099 return mul_fixed16(downscale_w, downscale_h);
4102 static uint_fixed_16_16_t
4103 skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
4105 uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
4107 if (!crtc_state->base.enable)
4108 return pipe_downscale;
4110 if (crtc_state->pch_pfit.enabled) {
4111 u32 src_w, src_h, dst_w, dst_h;
4112 u32 pfit_size = crtc_state->pch_pfit.size;
4113 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4114 uint_fixed_16_16_t downscale_h, downscale_w;
4116 src_w = crtc_state->pipe_src_w;
4117 src_h = crtc_state->pipe_src_h;
4118 dst_w = pfit_size >> 16;
4119 dst_h = pfit_size & 0xffff;
4121 if (!dst_w || !dst_h)
4122 return pipe_downscale;
4124 fp_w_ratio = div_fixed16(src_w, dst_w);
4125 fp_h_ratio = div_fixed16(src_h, dst_h);
4126 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4127 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4129 pipe_downscale = mul_fixed16(downscale_w, downscale_h);
4132 return pipe_downscale;
4135 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
4136 struct intel_crtc_state *cstate)
4138 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4139 struct drm_crtc_state *crtc_state = &cstate->base;
4140 struct drm_atomic_state *state = crtc_state->state;
4141 struct drm_plane *plane;
4142 const struct drm_plane_state *pstate;
4143 struct intel_plane_state *intel_pstate;
4144 int crtc_clock, dotclk;
4145 u32 pipe_max_pixel_rate;
4146 uint_fixed_16_16_t pipe_downscale;
4147 uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
4149 if (!cstate->base.enable)
4152 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4153 uint_fixed_16_16_t plane_downscale;
4154 uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
4157 if (!intel_wm_plane_visible(cstate,
4158 to_intel_plane_state(pstate)))
4161 if (WARN_ON(!pstate->fb))
4164 intel_pstate = to_intel_plane_state(pstate);
4165 plane_downscale = skl_plane_downscale_amount(cstate,
4167 bpp = pstate->fb->format->cpp[0] * 8;
4169 plane_downscale = mul_fixed16(plane_downscale,
4172 max_downscale = max_fixed16(plane_downscale, max_downscale);
4174 pipe_downscale = skl_pipe_downscale_amount(cstate);
4176 pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
4178 crtc_clock = crtc_state->adjusted_mode.crtc_clock;
4179 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
4181 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
4184 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
4186 if (pipe_max_pixel_rate < crtc_clock) {
4187 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
4195 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4196 const struct intel_plane_state *intel_pstate,
4199 struct intel_plane *intel_plane =
4200 to_intel_plane(intel_pstate->base.plane);
4202 u32 width = 0, height = 0;
4203 struct drm_framebuffer *fb;
4205 uint_fixed_16_16_t down_scale_amount;
4208 if (!intel_pstate->base.visible)
4211 fb = intel_pstate->base.fb;
4212 format = fb->format->format;
4214 if (intel_plane->id == PLANE_CURSOR)
4216 if (plane == 1 && !is_planar_yuv_format(format))
4220 * Src coordinates are already rotated by 270 degrees for
4221 * the 90/270 degree plane rotation cases (to match the
4222 * GTT mapping), hence no need to account for rotation here.
4224 width = drm_rect_width(&intel_pstate->base.src) >> 16;
4225 height = drm_rect_height(&intel_pstate->base.src) >> 16;
4227 /* UV plane does 1/2 pixel sub-sampling */
4228 if (plane == 1 && is_planar_yuv_format(format)) {
4233 data_rate = width * height;
4235 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4237 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4239 rate *= fb->format->cpp[plane];
4244 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4245 u64 *plane_data_rate,
4246 u64 *uv_plane_data_rate)
4248 struct drm_crtc_state *cstate = &intel_cstate->base;
4249 struct drm_atomic_state *state = cstate->state;
4250 struct drm_plane *plane;
4251 const struct drm_plane_state *pstate;
4252 u64 total_data_rate = 0;
4254 if (WARN_ON(!state))
4257 /* Calculate and cache data rate for each plane */
4258 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4259 enum plane_id plane_id = to_intel_plane(plane)->id;
4261 const struct intel_plane_state *intel_pstate =
4262 to_intel_plane_state(pstate);
4265 rate = skl_plane_relative_data_rate(intel_cstate,
4267 plane_data_rate[plane_id] = rate;
4268 total_data_rate += rate;
4271 rate = skl_plane_relative_data_rate(intel_cstate,
4273 uv_plane_data_rate[plane_id] = rate;
4274 total_data_rate += rate;
4277 return total_data_rate;
4281 icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4282 u64 *plane_data_rate)
4284 struct drm_crtc_state *cstate = &intel_cstate->base;
4285 struct drm_atomic_state *state = cstate->state;
4286 struct drm_plane *plane;
4287 const struct drm_plane_state *pstate;
4288 u64 total_data_rate = 0;
4290 if (WARN_ON(!state))
4293 /* Calculate and cache data rate for each plane */
4294 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4295 const struct intel_plane_state *intel_pstate =
4296 to_intel_plane_state(pstate);
4297 enum plane_id plane_id = to_intel_plane(plane)->id;
4300 if (!intel_pstate->linked_plane) {
4301 rate = skl_plane_relative_data_rate(intel_cstate,
4303 plane_data_rate[plane_id] = rate;
4304 total_data_rate += rate;
4306 enum plane_id y_plane_id;
4309 * The slave plane might not iterate in
4310 * drm_atomic_crtc_state_for_each_plane_state(),
4311 * and needs the master plane state which may be
4312 * NULL if we try get_new_plane_state(), so we
4313 * always calculate from the master.
4315 if (intel_pstate->slave)
4318 /* Y plane rate is calculated on the slave */
4319 rate = skl_plane_relative_data_rate(intel_cstate,
4321 y_plane_id = intel_pstate->linked_plane->id;
4322 plane_data_rate[y_plane_id] = rate;
4323 total_data_rate += rate;
4325 rate = skl_plane_relative_data_rate(intel_cstate,
4327 plane_data_rate[plane_id] = rate;
4328 total_data_rate += rate;
4332 return total_data_rate;
4336 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4337 struct skl_ddb_allocation *ddb /* out */)
4339 struct drm_atomic_state *state = cstate->base.state;
4340 struct drm_crtc *crtc = cstate->base.crtc;
4341 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4342 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4343 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4344 u16 alloc_size, start = 0;
4345 u16 total[I915_MAX_PLANES] = {};
4346 u16 uv_total[I915_MAX_PLANES] = {};
4347 u64 total_data_rate;
4348 enum plane_id plane_id;
4350 u64 plane_data_rate[I915_MAX_PLANES] = {};
4351 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4355 /* Clear the partitioning for disabled planes. */
4356 memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
4357 memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
4359 if (WARN_ON(!state))
4362 if (!cstate->base.active) {
4363 alloc->start = alloc->end = 0;
4367 if (INTEL_GEN(dev_priv) >= 11)
4369 icl_get_total_relative_data_rate(cstate,
4373 skl_get_total_relative_data_rate(cstate,
4375 uv_plane_data_rate);
4378 skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
4379 ddb, alloc, &num_active);
4380 alloc_size = skl_ddb_entry_size(alloc);
4381 if (alloc_size == 0)
4384 /* Allocate fixed number of blocks for cursor. */
4385 total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
4386 alloc_size -= total[PLANE_CURSOR];
4387 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4388 alloc->end - total[PLANE_CURSOR];
4389 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4391 if (total_data_rate == 0)
4395 * Find the highest watermark level for which we can satisfy the block
4396 * requirement of active planes.
4398 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4400 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4401 const struct skl_plane_wm *wm =
4402 &cstate->wm.skl.optimal.planes[plane_id];
4404 if (plane_id == PLANE_CURSOR) {
4405 if (WARN_ON(wm->wm[level].min_ddb_alloc >
4406 total[PLANE_CURSOR])) {
4413 blocks += wm->wm[level].min_ddb_alloc;
4414 blocks += wm->uv_wm[level].min_ddb_alloc;
4417 if (blocks <= alloc_size) {
4418 alloc_size -= blocks;
4424 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
4425 DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
4431 * Grant each plane the blocks it requires at the highest achievable
4432 * watermark level, plus an extra share of the leftover blocks
4433 * proportional to its relative data rate.
4435 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4436 const struct skl_plane_wm *wm =
4437 &cstate->wm.skl.optimal.planes[plane_id];
4441 if (plane_id == PLANE_CURSOR)
4445 * We've accounted for all active planes; remaining planes are
4448 if (total_data_rate == 0)
4451 rate = plane_data_rate[plane_id];
4452 extra = min_t(u16, alloc_size,
4453 DIV64_U64_ROUND_UP(alloc_size * rate,
4455 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4456 alloc_size -= extra;
4457 total_data_rate -= rate;
4459 if (total_data_rate == 0)
4462 rate = uv_plane_data_rate[plane_id];
4463 extra = min_t(u16, alloc_size,
4464 DIV64_U64_ROUND_UP(alloc_size * rate,
4466 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4467 alloc_size -= extra;
4468 total_data_rate -= rate;
4470 WARN_ON(alloc_size != 0 || total_data_rate != 0);
4472 /* Set the actual DDB start/end points for each plane */
4473 start = alloc->start;
4474 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4475 struct skl_ddb_entry *plane_alloc =
4476 &cstate->wm.skl.plane_ddb_y[plane_id];
4477 struct skl_ddb_entry *uv_plane_alloc =
4478 &cstate->wm.skl.plane_ddb_uv[plane_id];
4480 if (plane_id == PLANE_CURSOR)
4483 /* Gen11+ uses a separate plane for UV watermarks */
4484 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4486 /* Leave disabled planes at (0,0) */
4487 if (total[plane_id]) {
4488 plane_alloc->start = start;
4489 start += total[plane_id];
4490 plane_alloc->end = start;
4493 if (uv_total[plane_id]) {
4494 uv_plane_alloc->start = start;
4495 start += uv_total[plane_id];
4496 uv_plane_alloc->end = start;
4501 * When we calculated watermark values we didn't know how high
4502 * of a level we'd actually be able to hit, so we just marked
4503 * all levels as "enabled." Go back now and disable the ones
4504 * that aren't actually possible.
4506 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4507 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4508 struct skl_plane_wm *wm =
4509 &cstate->wm.skl.optimal.planes[plane_id];
4512 * We only disable the watermarks for each plane if
4513 * they exceed the ddb allocation of said plane. This
4514 * is done so that we don't end up touching cursor
4515 * watermarks needlessly when some other plane reduces
4516 * our max possible watermark level.
4518 * Bspec has this to say about the PLANE_WM enable bit:
4519 * "All the watermarks at this level for all enabled
4520 * planes must be enabled before the level will be used."
4521 * So this is actually safe to do.
4523 if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
4524 wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
4525 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4528 * Wa_1408961008:icl, ehl
4529 * Underruns with WM1+ disabled
4531 if (IS_GEN(dev_priv, 11) &&
4532 level == 1 && wm->wm[0].plane_en) {
4533 wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
4534 wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
4535 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
4541 * Go back and disable the transition watermark if it turns out we
4542 * don't have enough DDB blocks for it.
4544 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4545 struct skl_plane_wm *wm =
4546 &cstate->wm.skl.optimal.planes[plane_id];
4548 if (wm->trans_wm.plane_res_b >= total[plane_id])
4549 memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4556 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4557 * for the read latency) and cpp should always be <= 8, so that
4558 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4559 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4561 static uint_fixed_16_16_t
4562 skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
4563 u8 cpp, u32 latency, u32 dbuf_block_size)
4565 u32 wm_intermediate_val;
4566 uint_fixed_16_16_t ret;
4569 return FP_16_16_MAX;
4571 wm_intermediate_val = latency * pixel_rate * cpp;
4572 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
4574 if (INTEL_GEN(dev_priv) >= 10)
4575 ret = add_fixed16_u32(ret, 1);
4580 static uint_fixed_16_16_t
4581 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
4582 uint_fixed_16_16_t plane_blocks_per_line)
4584 u32 wm_intermediate_val;
4585 uint_fixed_16_16_t ret;
4588 return FP_16_16_MAX;
4590 wm_intermediate_val = latency * pixel_rate;
4591 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4592 pipe_htotal * 1000);
4593 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4597 static uint_fixed_16_16_t
4598 intel_get_linetime_us(const struct intel_crtc_state *cstate)
4602 uint_fixed_16_16_t linetime_us;
4604 if (!cstate->base.active)
4605 return u32_to_fixed16(0);
4607 pixel_rate = cstate->pixel_rate;
4609 if (WARN_ON(pixel_rate == 0))
4610 return u32_to_fixed16(0);
4612 crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
4613 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4619 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4620 const struct intel_plane_state *pstate)
4622 u64 adjusted_pixel_rate;
4623 uint_fixed_16_16_t downscale_amount;
4625 /* Shouldn't reach here on disabled planes... */
4626 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4630 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4631 * with additional adjustments for plane-specific scaling.
4633 adjusted_pixel_rate = cstate->pixel_rate;
4634 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
4636 return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4641 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4642 int width, const struct drm_format_info *format,
4643 u64 modifier, unsigned int rotation,
4644 u32 plane_pixel_rate, struct skl_wm_params *wp,
4647 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4648 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4651 /* only planar format has two planes */
4652 if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
4653 DRM_DEBUG_KMS("Non planar format have single plane\n");
4657 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
4658 modifier == I915_FORMAT_MOD_Yf_TILED ||
4659 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4660 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4661 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
4662 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4663 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4664 wp->is_planar = is_planar_yuv_format(format->format);
4667 if (color_plane == 1 && wp->is_planar)
4670 wp->cpp = format->cpp[color_plane];
4671 wp->plane_pixel_rate = plane_pixel_rate;
4673 if (INTEL_GEN(dev_priv) >= 11 &&
4674 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
4675 wp->dbuf_block_size = 256;
4677 wp->dbuf_block_size = 512;
4679 if (drm_rotation_90_or_270(rotation)) {
4682 wp->y_min_scanlines = 16;
4685 wp->y_min_scanlines = 8;
4688 wp->y_min_scanlines = 4;
4691 MISSING_CASE(wp->cpp);
4695 wp->y_min_scanlines = 4;
4698 if (skl_needs_memory_bw_wa(dev_priv))
4699 wp->y_min_scanlines *= 2;
4701 wp->plane_bytes_per_line = wp->width * wp->cpp;
4703 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4704 wp->y_min_scanlines,
4705 wp->dbuf_block_size);
4707 if (INTEL_GEN(dev_priv) >= 10)
4710 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4711 wp->y_min_scanlines);
4712 } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
4713 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4714 wp->dbuf_block_size);
4715 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4717 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4718 wp->dbuf_block_size) + 1;
4719 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4722 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4723 wp->plane_blocks_per_line);
4725 wp->linetime_us = fixed16_to_u32_round_up(
4726 intel_get_linetime_us(crtc_state));
4732 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
4733 const struct intel_plane_state *plane_state,
4734 struct skl_wm_params *wp, int color_plane)
4736 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4737 const struct drm_framebuffer *fb = plane_state->base.fb;
4740 if (plane->id == PLANE_CURSOR) {
4741 width = plane_state->base.crtc_w;
4744 * Src coordinates are already rotated by 270 degrees for
4745 * the 90/270 degree plane rotation cases (to match the
4746 * GTT mapping), hence no need to account for rotation here.
4748 width = drm_rect_width(&plane_state->base.src) >> 16;
4751 return skl_compute_wm_params(crtc_state, width,
4752 fb->format, fb->modifier,
4753 plane_state->base.rotation,
4754 skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
4758 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
4760 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4763 /* The number of lines are ignored for the level 0 watermark. */
4767 static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
4769 const struct skl_wm_params *wp,
4770 const struct skl_wm_level *result_prev,
4771 struct skl_wm_level *result /* out */)
4773 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4774 u32 latency = dev_priv->wm.skl_latency[level];
4775 uint_fixed_16_16_t method1, method2;
4776 uint_fixed_16_16_t selected_result;
4777 u32 res_blocks, res_lines, min_ddb_alloc = 0;
4781 result->min_ddb_alloc = U16_MAX;
4786 * WaIncreaseLatencyIPCEnabled: kbl,cfl
4787 * Display WA #1141: kbl,cfl
4789 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
4790 dev_priv->ipc_enabled)
4793 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
4796 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4797 wp->cpp, latency, wp->dbuf_block_size);
4798 method2 = skl_wm_method2(wp->plane_pixel_rate,
4799 cstate->base.adjusted_mode.crtc_htotal,
4801 wp->plane_blocks_per_line);
4804 selected_result = max_fixed16(method2, wp->y_tile_minimum);
4806 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4807 wp->dbuf_block_size < 1) &&
4808 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4809 selected_result = method2;
4810 } else if (latency >= wp->linetime_us) {
4811 if (IS_GEN(dev_priv, 9) &&
4812 !IS_GEMINILAKE(dev_priv))
4813 selected_result = min_fixed16(method1, method2);
4815 selected_result = method2;
4817 selected_result = method1;
4821 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4822 res_lines = div_round_up_fixed16(selected_result,
4823 wp->plane_blocks_per_line);
4825 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
4826 /* Display WA #1125: skl,bxt,kbl */
4827 if (level == 0 && wp->rc_surface)
4829 fixed16_to_u32_round_up(wp->y_tile_minimum);
4831 /* Display WA #1126: skl,bxt,kbl */
4832 if (level >= 1 && level <= 7) {
4835 fixed16_to_u32_round_up(wp->y_tile_minimum);
4836 res_lines += wp->y_min_scanlines;
4842 * Make sure result blocks for higher latency levels are
4843 * atleast as high as level below the current level.
4844 * Assumption in DDB algorithm optimization for special
4845 * cases. Also covers Display WA #1125 for RC.
4847 if (result_prev->plane_res_b > res_blocks)
4848 res_blocks = result_prev->plane_res_b;
4852 if (INTEL_GEN(dev_priv) >= 11) {
4856 if (res_lines % wp->y_min_scanlines == 0)
4857 extra_lines = wp->y_min_scanlines;
4859 extra_lines = wp->y_min_scanlines * 2 -
4860 res_lines % wp->y_min_scanlines;
4862 min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
4863 wp->plane_blocks_per_line);
4865 min_ddb_alloc = res_blocks +
4866 DIV_ROUND_UP(res_blocks, 10);
4870 if (!skl_wm_has_lines(dev_priv, level))
4873 if (res_lines > 31) {
4875 result->min_ddb_alloc = U16_MAX;
4880 * If res_lines is valid, assume we can use this watermark level
4881 * for now. We'll come back and disable it after we calculate the
4882 * DDB allocation if it turns out we don't actually have enough
4883 * blocks to satisfy it.
4885 result->plane_res_b = res_blocks;
4886 result->plane_res_l = res_lines;
4887 /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
4888 result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
4889 result->plane_en = true;
4893 skl_compute_wm_levels(const struct intel_crtc_state *cstate,
4894 const struct skl_wm_params *wm_params,
4895 struct skl_wm_level *levels)
4897 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4898 int level, max_level = ilk_wm_max_level(dev_priv);
4899 struct skl_wm_level *result_prev = &levels[0];
4901 for (level = 0; level <= max_level; level++) {
4902 struct skl_wm_level *result = &levels[level];
4904 skl_compute_plane_wm(cstate, level, wm_params,
4905 result_prev, result);
4907 result_prev = result;
4912 skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
4914 struct drm_atomic_state *state = cstate->base.state;
4915 struct drm_i915_private *dev_priv = to_i915(state->dev);
4916 uint_fixed_16_16_t linetime_us;
4919 linetime_us = intel_get_linetime_us(cstate);
4920 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4922 /* Display WA #1135: BXT:ALL GLK:ALL */
4923 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
4929 static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4930 const struct skl_wm_params *wp,
4931 struct skl_plane_wm *wm)
4933 struct drm_device *dev = cstate->base.crtc->dev;
4934 const struct drm_i915_private *dev_priv = to_i915(dev);
4935 u16 trans_min, trans_y_tile_min;
4936 const u16 trans_amount = 10; /* This is configurable amount */
4937 u16 wm0_sel_res_b, trans_offset_b, res_blocks;
4939 /* Transition WM are not recommended by HW team for GEN9 */
4940 if (INTEL_GEN(dev_priv) <= 9)
4943 /* Transition WM don't make any sense if ipc is disabled */
4944 if (!dev_priv->ipc_enabled)
4948 if (INTEL_GEN(dev_priv) >= 11)
4951 trans_offset_b = trans_min + trans_amount;
4954 * The spec asks for Selected Result Blocks for wm0 (the real value),
4955 * not Result Blocks (the integer value). Pay attention to the capital
4956 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4957 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4958 * and since we later will have to get the ceiling of the sum in the
4959 * transition watermarks calculation, we can just pretend Selected
4960 * Result Blocks is Result Blocks minus 1 and it should work for the
4961 * current platforms.
4963 wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
4967 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
4968 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4971 res_blocks = wm0_sel_res_b + trans_offset_b;
4973 /* WA BUG:1938466 add one block for non y-tile planes */
4974 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
4980 * Just assume we can enable the transition watermark. After
4981 * computing the DDB we'll come back and disable it if that
4982 * assumption turns out to be false.
4984 wm->trans_wm.plane_res_b = res_blocks + 1;
4985 wm->trans_wm.plane_en = true;
4988 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
4989 const struct intel_plane_state *plane_state,
4990 enum plane_id plane_id, int color_plane)
4992 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4993 struct skl_wm_params wm_params;
4996 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
4997 &wm_params, color_plane);
5001 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
5002 skl_compute_transition_wm(crtc_state, &wm_params, wm);
5007 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5008 const struct intel_plane_state *plane_state,
5009 enum plane_id plane_id)
5011 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5012 struct skl_wm_params wm_params;
5015 wm->is_planar = true;
5017 /* uv plane watermarks must also be validated for NV12/Planar */
5018 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5023 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5028 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5029 const struct intel_plane_state *plane_state)
5031 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
5032 const struct drm_framebuffer *fb = plane_state->base.fb;
5033 enum plane_id plane_id = plane->id;
5036 if (!intel_wm_plane_visible(crtc_state, plane_state))
5039 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5044 if (fb->format->is_yuv && fb->format->num_planes > 1) {
5045 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5054 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5055 const struct intel_plane_state *plane_state)
5057 enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
5060 /* Watermarks calculated in master */
5061 if (plane_state->slave)
5064 if (plane_state->linked_plane) {
5065 const struct drm_framebuffer *fb = plane_state->base.fb;
5066 enum plane_id y_plane_id = plane_state->linked_plane->id;
5068 WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
5069 WARN_ON(!fb->format->is_yuv ||
5070 fb->format->num_planes == 1);
5072 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5077 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5081 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5082 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5091 static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
5093 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5094 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5095 struct drm_crtc_state *crtc_state = &cstate->base;
5096 struct drm_plane *plane;
5097 const struct drm_plane_state *pstate;
5101 * We'll only calculate watermarks for planes that are actually
5102 * enabled, so make sure all other planes are set as disabled.
5104 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
5106 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
5107 const struct intel_plane_state *intel_pstate =
5108 to_intel_plane_state(pstate);
5110 if (INTEL_GEN(dev_priv) >= 11)
5111 ret = icl_build_plane_wm(cstate, intel_pstate);
5113 ret = skl_build_plane_wm(cstate, intel_pstate);
5118 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
5123 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5125 const struct skl_ddb_entry *entry)
5128 I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
5130 I915_WRITE_FW(reg, 0);
5133 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5135 const struct skl_wm_level *level)
5139 if (level->plane_en)
5141 if (level->ignore_lines)
5142 val |= PLANE_WM_IGNORE_LINES;
5143 val |= level->plane_res_b;
5144 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
5146 I915_WRITE_FW(reg, val);
5149 void skl_write_plane_wm(struct intel_plane *plane,
5150 const struct intel_crtc_state *crtc_state)
5152 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5153 int level, max_level = ilk_wm_max_level(dev_priv);
5154 enum plane_id plane_id = plane->id;
5155 enum pipe pipe = plane->pipe;
5156 const struct skl_plane_wm *wm =
5157 &crtc_state->wm.skl.optimal.planes[plane_id];
5158 const struct skl_ddb_entry *ddb_y =
5159 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5160 const struct skl_ddb_entry *ddb_uv =
5161 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5163 for (level = 0; level <= max_level; level++) {
5164 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5167 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5170 if (INTEL_GEN(dev_priv) >= 11) {
5171 skl_ddb_entry_write(dev_priv,
5172 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5177 swap(ddb_y, ddb_uv);
5179 skl_ddb_entry_write(dev_priv,
5180 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5181 skl_ddb_entry_write(dev_priv,
5182 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5185 void skl_write_cursor_wm(struct intel_plane *plane,
5186 const struct intel_crtc_state *crtc_state)
5188 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5189 int level, max_level = ilk_wm_max_level(dev_priv);
5190 enum plane_id plane_id = plane->id;
5191 enum pipe pipe = plane->pipe;
5192 const struct skl_plane_wm *wm =
5193 &crtc_state->wm.skl.optimal.planes[plane_id];
5194 const struct skl_ddb_entry *ddb =
5195 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5197 for (level = 0; level <= max_level; level++) {
5198 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5201 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5203 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5206 bool skl_wm_level_equals(const struct skl_wm_level *l1,
5207 const struct skl_wm_level *l2)
5209 return l1->plane_en == l2->plane_en &&
5210 l1->ignore_lines == l2->ignore_lines &&
5211 l1->plane_res_l == l2->plane_res_l &&
5212 l1->plane_res_b == l2->plane_res_b;
5215 static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5216 const struct skl_plane_wm *wm1,
5217 const struct skl_plane_wm *wm2)
5219 int level, max_level = ilk_wm_max_level(dev_priv);
5221 for (level = 0; level <= max_level; level++) {
5222 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
5223 !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
5227 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
5230 static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
5231 const struct skl_pipe_wm *wm1,
5232 const struct skl_pipe_wm *wm2)
5234 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5235 enum plane_id plane_id;
5237 for_each_plane_id_on_crtc(crtc, plane_id) {
5238 if (!skl_plane_wm_equals(dev_priv,
5239 &wm1->planes[plane_id],
5240 &wm2->planes[plane_id]))
5244 return wm1->linetime == wm2->linetime;
5247 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5248 const struct skl_ddb_entry *b)
5250 return a->start < b->end && b->start < a->end;
5253 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5254 const struct skl_ddb_entry *entries,
5255 int num_entries, int ignore_idx)
5259 for (i = 0; i < num_entries; i++) {
5260 if (i != ignore_idx &&
5261 skl_ddb_entries_overlap(ddb, &entries[i]))
5269 pipes_modified(struct intel_atomic_state *state)
5271 struct intel_crtc *crtc;
5272 struct intel_crtc_state *cstate;
5275 for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
5276 ret |= drm_crtc_mask(&crtc->base);
5282 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5283 struct intel_crtc_state *new_crtc_state)
5285 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
5286 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5288 struct intel_plane *plane;
5290 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5291 struct intel_plane_state *plane_state;
5292 enum plane_id plane_id = plane->id;
5294 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5295 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5296 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5297 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5300 plane_state = intel_atomic_get_plane_state(state, plane);
5301 if (IS_ERR(plane_state))
5302 return PTR_ERR(plane_state);
5304 new_crtc_state->update_planes |= BIT(plane_id);
5311 skl_compute_ddb(struct intel_atomic_state *state)
5313 const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5314 struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5315 struct intel_crtc_state *old_crtc_state;
5316 struct intel_crtc_state *new_crtc_state;
5317 struct intel_crtc *crtc;
5320 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
5322 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5323 new_crtc_state, i) {
5324 ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
5328 ret = skl_ddb_add_affected_planes(old_crtc_state,
5337 static char enast(bool enable)
5339 return enable ? '*' : ' ';
5343 skl_print_wm_changes(struct intel_atomic_state *state)
5345 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5346 const struct intel_crtc_state *old_crtc_state;
5347 const struct intel_crtc_state *new_crtc_state;
5348 struct intel_plane *plane;
5349 struct intel_crtc *crtc;
5352 if ((drm_debug & DRM_UT_KMS) == 0)
5355 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5356 new_crtc_state, i) {
5357 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
5359 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5360 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5362 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5363 enum plane_id plane_id = plane->id;
5364 const struct skl_ddb_entry *old, *new;
5366 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5367 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5369 if (skl_ddb_entry_equal(old, new))
5372 DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5373 plane->base.base.id, plane->base.name,
5374 old->start, old->end, new->start, new->end,
5375 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
5378 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5379 enum plane_id plane_id = plane->id;
5380 const struct skl_plane_wm *old_wm, *new_wm;
5382 old_wm = &old_pipe_wm->planes[plane_id];
5383 new_wm = &new_pipe_wm->planes[plane_id];
5385 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
5388 DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5389 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5390 plane->base.base.id, plane->base.name,
5391 enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5392 enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5393 enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5394 enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5395 enast(old_wm->trans_wm.plane_en),
5396 enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5397 enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5398 enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5399 enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5400 enast(new_wm->trans_wm.plane_en));
5402 DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5403 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5404 plane->base.base.id, plane->base.name,
5405 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5406 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5407 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5408 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5409 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5410 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5411 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5412 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5413 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
5415 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5416 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5417 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5418 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5419 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5420 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5421 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5422 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5423 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
5425 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5426 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5427 plane->base.base.id, plane->base.name,
5428 old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5429 old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5430 old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5431 old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5432 old_wm->trans_wm.plane_res_b,
5433 new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5434 new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5435 new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5436 new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5437 new_wm->trans_wm.plane_res_b);
5439 DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5440 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5441 plane->base.base.id, plane->base.name,
5442 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5443 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5444 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5445 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5446 old_wm->trans_wm.min_ddb_alloc,
5447 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5448 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5449 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5450 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5451 new_wm->trans_wm.min_ddb_alloc);
5457 skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
5459 struct drm_device *dev = state->base.dev;
5460 const struct drm_i915_private *dev_priv = to_i915(dev);
5461 struct intel_crtc *crtc;
5462 struct intel_crtc_state *crtc_state;
5463 u32 realloc_pipes = pipes_modified(state);
5467 * When we distrust bios wm we always need to recompute to set the
5468 * expected DDB allocations for each CRTC.
5470 if (dev_priv->wm.distrust_bios_wm)
5474 * If this transaction isn't actually touching any CRTC's, don't
5475 * bother with watermark calculation. Note that if we pass this
5476 * test, we're guaranteed to hold at least one CRTC state mutex,
5477 * which means we can safely use values like dev_priv->active_crtcs
5478 * since any racing commits that want to update them would need to
5479 * hold _all_ CRTC state mutexes.
5481 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
5488 * If this is our first atomic update following hardware readout,
5489 * we can't trust the DDB that the BIOS programmed for us. Let's
5490 * pretend that all pipes switched active status so that we'll
5491 * ensure a full DDB recompute.
5493 if (dev_priv->wm.distrust_bios_wm) {
5494 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
5495 state->base.acquire_ctx);
5499 state->active_pipe_changes = ~0;
5502 * We usually only initialize state->active_crtcs if we
5503 * we're doing a modeset; make sure this field is always
5504 * initialized during the sanitization process that happens
5505 * on the first commit too.
5507 if (!state->modeset)
5508 state->active_crtcs = dev_priv->active_crtcs;
5512 * If the modeset changes which CRTC's are active, we need to
5513 * recompute the DDB allocation for *all* active pipes, even
5514 * those that weren't otherwise being modified in any way by this
5515 * atomic commit. Due to the shrinking of the per-pipe allocations
5516 * when new active CRTC's are added, it's possible for a pipe that
5517 * we were already using and aren't changing at all here to suddenly
5518 * become invalid if its DDB needs exceeds its new allocation.
5520 * Note that if we wind up doing a full DDB recompute, we can't let
5521 * any other display updates race with this transaction, so we need
5522 * to grab the lock on *all* CRTC's.
5524 if (state->active_pipe_changes || state->modeset) {
5526 state->wm_results.dirty_pipes = ~0;
5530 * We're not recomputing for the pipes not included in the commit, so
5531 * make sure we start with the current state.
5533 for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
5534 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5535 if (IS_ERR(crtc_state))
5536 return PTR_ERR(crtc_state);
5543 * To make sure the cursor watermark registers are always consistent
5544 * with our computed state the following scenario needs special
5548 * 2. move cursor entirely offscreen
5551 * Step 2. does call .disable_plane() but does not zero the watermarks
5552 * (since we consider an offscreen cursor still active for the purposes
5553 * of watermarks). Step 3. would not normally call .disable_plane()
5554 * because the actual plane visibility isn't changing, and we don't
5555 * deallocate the cursor ddb until the pipe gets disabled. So we must
5556 * force step 3. to call .disable_plane() to update the watermark
5557 * registers properly.
5559 * Other planes do not suffer from this issues as their watermarks are
5560 * calculated based on the actual plane visibility. The only time this
5561 * can trigger for the other planes is during the initial readout as the
5562 * default value of the watermarks registers is not zero.
5564 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
5565 struct intel_crtc *crtc)
5567 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5568 const struct intel_crtc_state *old_crtc_state =
5569 intel_atomic_get_old_crtc_state(state, crtc);
5570 struct intel_crtc_state *new_crtc_state =
5571 intel_atomic_get_new_crtc_state(state, crtc);
5572 struct intel_plane *plane;
5574 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5575 struct intel_plane_state *plane_state;
5576 enum plane_id plane_id = plane->id;
5579 * Force a full wm update for every plane on modeset.
5580 * Required because the reset value of the wm registers
5581 * is non-zero, whereas we want all disabled planes to
5582 * have zero watermarks. So if we turn off the relevant
5583 * power well the hardware state will go out of sync
5584 * with the software state.
5586 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
5587 skl_plane_wm_equals(dev_priv,
5588 &old_crtc_state->wm.skl.optimal.planes[plane_id],
5589 &new_crtc_state->wm.skl.optimal.planes[plane_id]))
5592 plane_state = intel_atomic_get_plane_state(state, plane);
5593 if (IS_ERR(plane_state))
5594 return PTR_ERR(plane_state);
5596 new_crtc_state->update_planes |= BIT(plane_id);
5603 skl_compute_wm(struct intel_atomic_state *state)
5605 struct intel_crtc *crtc;
5606 struct intel_crtc_state *new_crtc_state;
5607 struct intel_crtc_state *old_crtc_state;
5608 struct skl_ddb_values *results = &state->wm_results;
5609 bool changed = false;
5612 /* Clear all dirty flags */
5613 results->dirty_pipes = 0;
5615 ret = skl_ddb_add_affected_pipes(state, &changed);
5616 if (ret || !changed)
5620 * Calculate WM's for all pipes that are part of this transaction.
5621 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
5622 * weren't otherwise being modified (and set bits in dirty_pipes) if
5623 * pipe allocations had to change.
5625 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5626 new_crtc_state, i) {
5627 ret = skl_build_pipe_wm(new_crtc_state);
5631 ret = skl_wm_add_affected_planes(state, crtc);
5635 if (!skl_pipe_wm_equals(crtc,
5636 &old_crtc_state->wm.skl.optimal,
5637 &new_crtc_state->wm.skl.optimal))
5638 results->dirty_pipes |= drm_crtc_mask(&crtc->base);
5641 ret = skl_compute_ddb(state);
5645 skl_print_wm_changes(state);
5650 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
5651 struct intel_crtc_state *cstate)
5653 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
5654 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5655 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5656 enum pipe pipe = crtc->pipe;
5658 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
5661 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5664 static void skl_initial_wm(struct intel_atomic_state *state,
5665 struct intel_crtc_state *cstate)
5667 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5668 struct drm_device *dev = intel_crtc->base.dev;
5669 struct drm_i915_private *dev_priv = to_i915(dev);
5670 struct skl_ddb_values *results = &state->wm_results;
5672 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
5675 mutex_lock(&dev_priv->wm.wm_mutex);
5677 if (cstate->base.active_changed)
5678 skl_atomic_update_crtc_wm(state, cstate);
5680 mutex_unlock(&dev_priv->wm.wm_mutex);
5683 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
5684 struct intel_wm_config *config)
5686 struct intel_crtc *crtc;
5688 /* Compute the currently _active_ config */
5689 for_each_intel_crtc(&dev_priv->drm, crtc) {
5690 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5692 if (!wm->pipe_enabled)
5695 config->sprites_enabled |= wm->sprites_enabled;
5696 config->sprites_scaled |= wm->sprites_scaled;
5697 config->num_pipes_active++;
5701 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5703 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5704 struct ilk_wm_maximums max;
5705 struct intel_wm_config config = {};
5706 struct ilk_wm_values results = {};
5707 enum intel_ddb_partitioning partitioning;
5709 ilk_compute_wm_config(dev_priv, &config);
5711 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
5712 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
5714 /* 5/6 split only in single pipe config on IVB+ */
5715 if (INTEL_GEN(dev_priv) >= 7 &&
5716 config.num_pipes_active == 1 && config.sprites_enabled) {
5717 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
5718 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
5720 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
5722 best_lp_wm = &lp_wm_1_2;
5725 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5726 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5728 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
5730 ilk_write_wm_values(dev_priv, &results);
5733 static void ilk_initial_watermarks(struct intel_atomic_state *state,
5734 struct intel_crtc_state *cstate)
5736 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5737 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5739 mutex_lock(&dev_priv->wm.wm_mutex);
5740 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
5741 ilk_program_watermarks(dev_priv);
5742 mutex_unlock(&dev_priv->wm.wm_mutex);
5745 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5746 struct intel_crtc_state *cstate)
5748 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5749 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5751 mutex_lock(&dev_priv->wm.wm_mutex);
5752 if (cstate->wm.need_postvbl_update) {
5753 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
5754 ilk_program_watermarks(dev_priv);
5756 mutex_unlock(&dev_priv->wm.wm_mutex);
5759 static inline void skl_wm_level_from_reg_val(u32 val,
5760 struct skl_wm_level *level)
5762 level->plane_en = val & PLANE_WM_EN;
5763 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
5764 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5765 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5766 PLANE_WM_LINES_MASK;
5769 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
5770 struct skl_pipe_wm *out)
5772 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5773 enum pipe pipe = crtc->pipe;
5774 int level, max_level;
5775 enum plane_id plane_id;
5778 max_level = ilk_wm_max_level(dev_priv);
5780 for_each_plane_id_on_crtc(crtc, plane_id) {
5781 struct skl_plane_wm *wm = &out->planes[plane_id];
5783 for (level = 0; level <= max_level; level++) {
5784 if (plane_id != PLANE_CURSOR)
5785 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5787 val = I915_READ(CUR_WM(pipe, level));
5789 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5792 if (plane_id != PLANE_CURSOR)
5793 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5795 val = I915_READ(CUR_WM_TRANS(pipe));
5797 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5803 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5806 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
5808 struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5809 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5810 struct intel_crtc *crtc;
5811 struct intel_crtc_state *cstate;
5813 skl_ddb_get_hw_state(dev_priv, ddb);
5814 for_each_intel_crtc(&dev_priv->drm, crtc) {
5815 cstate = to_intel_crtc_state(crtc->base.state);
5817 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
5820 hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
5823 if (dev_priv->active_crtcs) {
5824 /* Fully recompute DDB on first atomic commit */
5825 dev_priv->wm.distrust_bios_wm = true;
5829 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
5831 struct drm_device *dev = crtc->base.dev;
5832 struct drm_i915_private *dev_priv = to_i915(dev);
5833 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5834 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
5835 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5836 enum pipe pipe = crtc->pipe;
5837 static const i915_reg_t wm0_pipe_reg[] = {
5838 [PIPE_A] = WM0_PIPEA_ILK,
5839 [PIPE_B] = WM0_PIPEB_ILK,
5840 [PIPE_C] = WM0_PIPEC_IVB,
5843 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5844 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5845 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5847 memset(active, 0, sizeof(*active));
5849 active->pipe_enabled = crtc->active;
5851 if (active->pipe_enabled) {
5852 u32 tmp = hw->wm_pipe[pipe];
5855 * For active pipes LP0 watermark is marked as
5856 * enabled, and LP1+ watermaks as disabled since
5857 * we can't really reverse compute them in case
5858 * multiple pipes are active.
5860 active->wm[0].enable = true;
5861 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5862 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5863 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5864 active->linetime = hw->wm_linetime[pipe];
5866 int level, max_level = ilk_wm_max_level(dev_priv);
5869 * For inactive pipes, all watermark levels
5870 * should be marked as enabled but zeroed,
5871 * which is what we'd compute them to.
5873 for (level = 0; level <= max_level; level++)
5874 active->wm[level].enable = true;
5877 crtc->wm.active.ilk = *active;
5880 #define _FW_WM(value, plane) \
5881 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5882 #define _FW_WM_VLV(value, plane) \
5883 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5885 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5886 struct g4x_wm_values *wm)
5890 tmp = I915_READ(DSPFW1);
5891 wm->sr.plane = _FW_WM(tmp, SR);
5892 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5893 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5894 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5896 tmp = I915_READ(DSPFW2);
5897 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5898 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5899 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5900 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5901 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5902 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5904 tmp = I915_READ(DSPFW3);
5905 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5906 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5907 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5908 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5911 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5912 struct vlv_wm_values *wm)
5917 for_each_pipe(dev_priv, pipe) {
5918 tmp = I915_READ(VLV_DDL(pipe));
5920 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5921 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5922 wm->ddl[pipe].plane[PLANE_CURSOR] =
5923 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5924 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5925 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5926 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5927 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5930 tmp = I915_READ(DSPFW1);
5931 wm->sr.plane = _FW_WM(tmp, SR);
5932 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5933 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5934 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5936 tmp = I915_READ(DSPFW2);
5937 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5938 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5939 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5941 tmp = I915_READ(DSPFW3);
5942 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5944 if (IS_CHERRYVIEW(dev_priv)) {
5945 tmp = I915_READ(DSPFW7_CHV);
5946 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5947 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5949 tmp = I915_READ(DSPFW8_CHV);
5950 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5951 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5953 tmp = I915_READ(DSPFW9_CHV);
5954 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5955 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5957 tmp = I915_READ(DSPHOWM);
5958 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5959 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5960 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5961 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5962 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5963 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5964 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5965 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5966 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5967 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5969 tmp = I915_READ(DSPFW7);
5970 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5971 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5973 tmp = I915_READ(DSPHOWM);
5974 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5975 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5976 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5977 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5978 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5979 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5980 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5987 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
5989 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5990 struct intel_crtc *crtc;
5992 g4x_read_wm_values(dev_priv, wm);
5994 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5996 for_each_intel_crtc(&dev_priv->drm, crtc) {
5997 struct intel_crtc_state *crtc_state =
5998 to_intel_crtc_state(crtc->base.state);
5999 struct g4x_wm_state *active = &crtc->wm.active.g4x;
6000 struct g4x_pipe_wm *raw;
6001 enum pipe pipe = crtc->pipe;
6002 enum plane_id plane_id;
6003 int level, max_level;
6005 active->cxsr = wm->cxsr;
6006 active->hpll_en = wm->hpll_en;
6007 active->fbc_en = wm->fbc_en;
6009 active->sr = wm->sr;
6010 active->hpll = wm->hpll;
6012 for_each_plane_id_on_crtc(crtc, plane_id) {
6013 active->wm.plane[plane_id] =
6014 wm->pipe[pipe].plane[plane_id];
6017 if (wm->cxsr && wm->hpll_en)
6018 max_level = G4X_WM_LEVEL_HPLL;
6020 max_level = G4X_WM_LEVEL_SR;
6022 max_level = G4X_WM_LEVEL_NORMAL;
6024 level = G4X_WM_LEVEL_NORMAL;
6025 raw = &crtc_state->wm.g4x.raw[level];
6026 for_each_plane_id_on_crtc(crtc, plane_id)
6027 raw->plane[plane_id] = active->wm.plane[plane_id];
6029 if (++level > max_level)
6032 raw = &crtc_state->wm.g4x.raw[level];
6033 raw->plane[PLANE_PRIMARY] = active->sr.plane;
6034 raw->plane[PLANE_CURSOR] = active->sr.cursor;
6035 raw->plane[PLANE_SPRITE0] = 0;
6036 raw->fbc = active->sr.fbc;
6038 if (++level > max_level)
6041 raw = &crtc_state->wm.g4x.raw[level];
6042 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6043 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6044 raw->plane[PLANE_SPRITE0] = 0;
6045 raw->fbc = active->hpll.fbc;
6048 for_each_plane_id_on_crtc(crtc, plane_id)
6049 g4x_raw_plane_wm_set(crtc_state, level,
6050 plane_id, USHRT_MAX);
6051 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6053 crtc_state->wm.g4x.optimal = *active;
6054 crtc_state->wm.g4x.intermediate = *active;
6056 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6058 wm->pipe[pipe].plane[PLANE_PRIMARY],
6059 wm->pipe[pipe].plane[PLANE_CURSOR],
6060 wm->pipe[pipe].plane[PLANE_SPRITE0]);
6063 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6064 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
6065 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6066 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
6067 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
6068 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
6071 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6073 struct intel_plane *plane;
6074 struct intel_crtc *crtc;
6076 mutex_lock(&dev_priv->wm.wm_mutex);
6078 for_each_intel_plane(&dev_priv->drm, plane) {
6079 struct intel_crtc *crtc =
6080 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6081 struct intel_crtc_state *crtc_state =
6082 to_intel_crtc_state(crtc->base.state);
6083 struct intel_plane_state *plane_state =
6084 to_intel_plane_state(plane->base.state);
6085 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6086 enum plane_id plane_id = plane->id;
6089 if (plane_state->base.visible)
6092 for (level = 0; level < 3; level++) {
6093 struct g4x_pipe_wm *raw =
6094 &crtc_state->wm.g4x.raw[level];
6096 raw->plane[plane_id] = 0;
6097 wm_state->wm.plane[plane_id] = 0;
6100 if (plane_id == PLANE_PRIMARY) {
6101 for (level = 0; level < 3; level++) {
6102 struct g4x_pipe_wm *raw =
6103 &crtc_state->wm.g4x.raw[level];
6107 wm_state->sr.fbc = 0;
6108 wm_state->hpll.fbc = 0;
6109 wm_state->fbc_en = false;
6113 for_each_intel_crtc(&dev_priv->drm, crtc) {
6114 struct intel_crtc_state *crtc_state =
6115 to_intel_crtc_state(crtc->base.state);
6117 crtc_state->wm.g4x.intermediate =
6118 crtc_state->wm.g4x.optimal;
6119 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6122 g4x_program_watermarks(dev_priv);
6124 mutex_unlock(&dev_priv->wm.wm_mutex);
6127 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6129 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6130 struct intel_crtc *crtc;
6133 vlv_read_wm_values(dev_priv, wm);
6135 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6136 wm->level = VLV_WM_LEVEL_PM2;
6138 if (IS_CHERRYVIEW(dev_priv)) {
6139 vlv_punit_get(dev_priv);
6141 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6142 if (val & DSP_MAXFIFO_PM5_ENABLE)
6143 wm->level = VLV_WM_LEVEL_PM5;
6146 * If DDR DVFS is disabled in the BIOS, Punit
6147 * will never ack the request. So if that happens
6148 * assume we don't have to enable/disable DDR DVFS
6149 * dynamically. To test that just set the REQ_ACK
6150 * bit to poke the Punit, but don't change the
6151 * HIGH/LOW bits so that we don't actually change
6152 * the current state.
6154 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6155 val |= FORCE_DDR_FREQ_REQ_ACK;
6156 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6158 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6159 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6160 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
6161 "assuming DDR DVFS is disabled\n");
6162 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6164 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6165 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6166 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6169 vlv_punit_put(dev_priv);
6172 for_each_intel_crtc(&dev_priv->drm, crtc) {
6173 struct intel_crtc_state *crtc_state =
6174 to_intel_crtc_state(crtc->base.state);
6175 struct vlv_wm_state *active = &crtc->wm.active.vlv;
6176 const struct vlv_fifo_state *fifo_state =
6177 &crtc_state->wm.vlv.fifo_state;
6178 enum pipe pipe = crtc->pipe;
6179 enum plane_id plane_id;
6182 vlv_get_fifo_size(crtc_state);
6184 active->num_levels = wm->level + 1;
6185 active->cxsr = wm->cxsr;
6187 for (level = 0; level < active->num_levels; level++) {
6188 struct g4x_pipe_wm *raw =
6189 &crtc_state->wm.vlv.raw[level];
6191 active->sr[level].plane = wm->sr.plane;
6192 active->sr[level].cursor = wm->sr.cursor;
6194 for_each_plane_id_on_crtc(crtc, plane_id) {
6195 active->wm[level].plane[plane_id] =
6196 wm->pipe[pipe].plane[plane_id];
6198 raw->plane[plane_id] =
6199 vlv_invert_wm_value(active->wm[level].plane[plane_id],
6200 fifo_state->plane[plane_id]);
6204 for_each_plane_id_on_crtc(crtc, plane_id)
6205 vlv_raw_plane_wm_set(crtc_state, level,
6206 plane_id, USHRT_MAX);
6207 vlv_invalidate_wms(crtc, active, level);
6209 crtc_state->wm.vlv.optimal = *active;
6210 crtc_state->wm.vlv.intermediate = *active;
6212 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6214 wm->pipe[pipe].plane[PLANE_PRIMARY],
6215 wm->pipe[pipe].plane[PLANE_CURSOR],
6216 wm->pipe[pipe].plane[PLANE_SPRITE0],
6217 wm->pipe[pipe].plane[PLANE_SPRITE1]);
6220 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6221 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6224 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
6226 struct intel_plane *plane;
6227 struct intel_crtc *crtc;
6229 mutex_lock(&dev_priv->wm.wm_mutex);
6231 for_each_intel_plane(&dev_priv->drm, plane) {
6232 struct intel_crtc *crtc =
6233 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6234 struct intel_crtc_state *crtc_state =
6235 to_intel_crtc_state(crtc->base.state);
6236 struct intel_plane_state *plane_state =
6237 to_intel_plane_state(plane->base.state);
6238 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6239 const struct vlv_fifo_state *fifo_state =
6240 &crtc_state->wm.vlv.fifo_state;
6241 enum plane_id plane_id = plane->id;
6244 if (plane_state->base.visible)
6247 for (level = 0; level < wm_state->num_levels; level++) {
6248 struct g4x_pipe_wm *raw =
6249 &crtc_state->wm.vlv.raw[level];
6251 raw->plane[plane_id] = 0;
6253 wm_state->wm[level].plane[plane_id] =
6254 vlv_invert_wm_value(raw->plane[plane_id],
6255 fifo_state->plane[plane_id]);
6259 for_each_intel_crtc(&dev_priv->drm, crtc) {
6260 struct intel_crtc_state *crtc_state =
6261 to_intel_crtc_state(crtc->base.state);
6263 crtc_state->wm.vlv.intermediate =
6264 crtc_state->wm.vlv.optimal;
6265 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6268 vlv_program_watermarks(dev_priv);
6270 mutex_unlock(&dev_priv->wm.wm_mutex);
6274 * FIXME should probably kill this and improve
6275 * the real watermark readout/sanitation instead
6277 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6279 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6280 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6281 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6284 * Don't touch WM1S_LP_EN here.
6285 * Doing so could cause underruns.
6289 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6291 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6292 struct intel_crtc *crtc;
6294 ilk_init_lp_watermarks(dev_priv);
6296 for_each_intel_crtc(&dev_priv->drm, crtc)
6297 ilk_pipe_wm_get_hw_state(crtc);
6299 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
6300 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
6301 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
6303 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6304 if (INTEL_GEN(dev_priv) >= 7) {
6305 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
6306 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
6309 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6310 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
6311 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6312 else if (IS_IVYBRIDGE(dev_priv))
6313 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
6314 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6317 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
6321 * intel_update_watermarks - update FIFO watermark values based on current modes
6322 * @crtc: the #intel_crtc on which to compute the WM
6324 * Calculate watermark values for the various WM regs based on current mode
6325 * and plane configuration.
6327 * There are several cases to deal with here:
6328 * - normal (i.e. non-self-refresh)
6329 * - self-refresh (SR) mode
6330 * - lines are large relative to FIFO size (buffer can hold up to 2)
6331 * - lines are small relative to FIFO size (buffer can hold more than 2
6332 * lines), so need to account for TLB latency
6334 * The normal calculation is:
6335 * watermark = dotclock * bytes per pixel * latency
6336 * where latency is platform & configuration dependent (we assume pessimal
6339 * The SR calculation is:
6340 * watermark = (trunc(latency/line time)+1) * surface width *
6343 * line time = htotal / dotclock
6344 * surface width = hdisplay for normal plane and 64 for cursor
6345 * and latency is assumed to be high, as above.
6347 * The final value programmed to the register should always be rounded up,
6348 * and include an extra 2 entries to account for clock crossings.
6350 * We don't use the sprite, so we can ignore that. And on Crestline we have
6351 * to set the non-SR watermarks to 8.
6353 void intel_update_watermarks(struct intel_crtc *crtc)
6355 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6357 if (dev_priv->display.update_wm)
6358 dev_priv->display.update_wm(crtc);
6361 void intel_enable_ipc(struct drm_i915_private *dev_priv)
6365 if (!HAS_IPC(dev_priv))
6368 val = I915_READ(DISP_ARB_CTL2);
6370 if (dev_priv->ipc_enabled)
6371 val |= DISP_IPC_ENABLE;
6373 val &= ~DISP_IPC_ENABLE;
6375 I915_WRITE(DISP_ARB_CTL2, val);
6378 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
6380 /* Display WA #0477 WaDisableIPC: skl */
6381 if (IS_SKYLAKE(dev_priv))
6384 /* Display WA #1141: SKL:all KBL:all CFL */
6385 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6386 return dev_priv->dram_info.symmetric_memory;
6391 void intel_init_ipc(struct drm_i915_private *dev_priv)
6393 if (!HAS_IPC(dev_priv))
6396 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
6398 intel_enable_ipc(dev_priv);
6402 * Lock protecting IPS related data structures
6404 DEFINE_SPINLOCK(mchdev_lock);
6406 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
6410 lockdep_assert_held(&mchdev_lock);
6412 rgvswctl = I915_READ16(MEMSWCTL);
6413 if (rgvswctl & MEMCTL_CMD_STS) {
6414 DRM_DEBUG("gpu busy, RCS change rejected\n");
6415 return false; /* still busy with another command */
6418 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6419 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6420 I915_WRITE16(MEMSWCTL, rgvswctl);
6421 POSTING_READ16(MEMSWCTL);
6423 rgvswctl |= MEMCTL_CMD_STS;
6424 I915_WRITE16(MEMSWCTL, rgvswctl);
6429 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
6432 u8 fmax, fmin, fstart, vstart;
6434 spin_lock_irq(&mchdev_lock);
6436 rgvmodectl = I915_READ(MEMMODECTL);
6438 /* Enable temp reporting */
6439 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6440 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
6442 /* 100ms RC evaluation intervals */
6443 I915_WRITE(RCUPEI, 100000);
6444 I915_WRITE(RCDNEI, 100000);
6446 /* Set max/min thresholds to 90ms and 80ms respectively */
6447 I915_WRITE(RCBMAXAVG, 90000);
6448 I915_WRITE(RCBMINAVG, 80000);
6450 I915_WRITE(MEMIHYST, 1);
6452 /* Set up min, max, and cur for interrupt handling */
6453 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6454 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6455 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6456 MEMMODE_FSTART_SHIFT;
6458 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
6461 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
6462 dev_priv->ips.fstart = fstart;
6464 dev_priv->ips.max_delay = fstart;
6465 dev_priv->ips.min_delay = fmin;
6466 dev_priv->ips.cur_delay = fstart;
6468 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6469 fmax, fmin, fstart);
6471 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
6474 * Interrupts will be enabled in ironlake_irq_postinstall
6477 I915_WRITE(VIDSTART, vstart);
6478 POSTING_READ(VIDSTART);
6480 rgvmodectl |= MEMMODE_SWMODE_EN;
6481 I915_WRITE(MEMMODECTL, rgvmodectl);
6483 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6484 DRM_ERROR("stuck trying to change perf mode\n");
6487 ironlake_set_drps(dev_priv, fstart);
6489 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
6490 I915_READ(DDREC) + I915_READ(CSIEC);
6491 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
6492 dev_priv->ips.last_count2 = I915_READ(GFXEC);
6493 dev_priv->ips.last_time2 = ktime_get_raw_ns();
6495 spin_unlock_irq(&mchdev_lock);
6498 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
6502 spin_lock_irq(&mchdev_lock);
6504 rgvswctl = I915_READ16(MEMSWCTL);
6506 /* Ack interrupts, disable EFC interrupt */
6507 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
6508 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
6509 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
6510 I915_WRITE(DEIIR, DE_PCU_EVENT);
6511 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
6513 /* Go back to the starting frequency */
6514 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
6516 rgvswctl |= MEMCTL_CMD_STS;
6517 I915_WRITE(MEMSWCTL, rgvswctl);
6520 spin_unlock_irq(&mchdev_lock);
6523 /* There's a funny hw issue where the hw returns all 0 when reading from
6524 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
6525 * ourselves, instead of doing a rmw cycle (which might result in us clearing
6526 * all limits and the gpu stuck at whatever frequency it is at atm).
6528 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
6530 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6533 /* Only set the down limit when we've reached the lowest level to avoid
6534 * getting more interrupts, otherwise leave this clear. This prevents a
6535 * race in the hw when coming out of rc6: There's a tiny window where
6536 * the hw runs at the minimal clock before selecting the desired
6537 * frequency, if the down threshold expires in that window we will not
6538 * receive a down interrupt. */
6539 if (INTEL_GEN(dev_priv) >= 9) {
6540 limits = (rps->max_freq_softlimit) << 23;
6541 if (val <= rps->min_freq_softlimit)
6542 limits |= (rps->min_freq_softlimit) << 14;
6544 limits = rps->max_freq_softlimit << 24;
6545 if (val <= rps->min_freq_softlimit)
6546 limits |= rps->min_freq_softlimit << 16;
6552 static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
6554 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6555 u32 threshold_up = 0, threshold_down = 0; /* in % */
6556 u32 ei_up = 0, ei_down = 0;
6558 lockdep_assert_held(&rps->power.mutex);
6560 if (new_power == rps->power.mode)
6563 /* Note the units here are not exactly 1us, but 1280ns. */
6564 switch (new_power) {
6566 /* Upclock if more than 95% busy over 16ms */
6570 /* Downclock if less than 85% busy over 32ms */
6572 threshold_down = 85;
6576 /* Upclock if more than 90% busy over 13ms */
6580 /* Downclock if less than 75% busy over 32ms */
6582 threshold_down = 75;
6586 /* Upclock if more than 85% busy over 10ms */
6590 /* Downclock if less than 60% busy over 32ms */
6592 threshold_down = 60;
6596 /* When byt can survive without system hang with dynamic
6597 * sw freq adjustments, this restriction can be lifted.
6599 if (IS_VALLEYVIEW(dev_priv))
6602 I915_WRITE(GEN6_RP_UP_EI,
6603 GT_INTERVAL_FROM_US(dev_priv, ei_up));
6604 I915_WRITE(GEN6_RP_UP_THRESHOLD,
6605 GT_INTERVAL_FROM_US(dev_priv,
6606 ei_up * threshold_up / 100));
6608 I915_WRITE(GEN6_RP_DOWN_EI,
6609 GT_INTERVAL_FROM_US(dev_priv, ei_down));
6610 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
6611 GT_INTERVAL_FROM_US(dev_priv,
6612 ei_down * threshold_down / 100));
6614 I915_WRITE(GEN6_RP_CONTROL,
6615 (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
6616 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6617 GEN6_RP_MEDIA_IS_GFX |
6619 GEN6_RP_UP_BUSY_AVG |
6620 GEN6_RP_DOWN_IDLE_AVG);
6623 rps->power.mode = new_power;
6624 rps->power.up_threshold = threshold_up;
6625 rps->power.down_threshold = threshold_down;
6628 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
6630 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6633 new_power = rps->power.mode;
6634 switch (rps->power.mode) {
6636 if (val > rps->efficient_freq + 1 &&
6637 val > rps->cur_freq)
6638 new_power = BETWEEN;
6642 if (val <= rps->efficient_freq &&
6643 val < rps->cur_freq)
6644 new_power = LOW_POWER;
6645 else if (val >= rps->rp0_freq &&
6646 val > rps->cur_freq)
6647 new_power = HIGH_POWER;
6651 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
6652 val < rps->cur_freq)
6653 new_power = BETWEEN;
6656 /* Max/min bins are special */
6657 if (val <= rps->min_freq_softlimit)
6658 new_power = LOW_POWER;
6659 if (val >= rps->max_freq_softlimit)
6660 new_power = HIGH_POWER;
6662 mutex_lock(&rps->power.mutex);
6663 if (rps->power.interactive)
6664 new_power = HIGH_POWER;
6665 rps_set_power(dev_priv, new_power);
6666 mutex_unlock(&rps->power.mutex);
6669 void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
6671 struct intel_rps *rps = &i915->gt_pm.rps;
6673 if (INTEL_GEN(i915) < 6)
6676 mutex_lock(&rps->power.mutex);
6678 if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
6679 rps_set_power(i915, HIGH_POWER);
6681 GEM_BUG_ON(!rps->power.interactive);
6682 rps->power.interactive--;
6684 mutex_unlock(&rps->power.mutex);
6687 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
6689 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6692 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6693 if (val > rps->min_freq_softlimit)
6694 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
6695 if (val < rps->max_freq_softlimit)
6696 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
6698 mask &= dev_priv->pm_rps_events;
6700 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
6703 /* gen6_set_rps is called to update the frequency request, but should also be
6704 * called when the range (min_delay and max_delay) is modified so that we can
6705 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6706 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6708 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6710 /* min/max delay may still have been modified so be sure to
6711 * write the limits value.
6713 if (val != rps->cur_freq) {
6714 gen6_set_rps_thresholds(dev_priv, val);
6716 if (INTEL_GEN(dev_priv) >= 9)
6717 I915_WRITE(GEN6_RPNSWREQ,
6718 GEN9_FREQUENCY(val));
6719 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6720 I915_WRITE(GEN6_RPNSWREQ,
6721 HSW_FREQUENCY(val));
6723 I915_WRITE(GEN6_RPNSWREQ,
6724 GEN6_FREQUENCY(val) |
6726 GEN6_AGGRESSIVE_TURBO);
6729 /* Make sure we continue to get interrupts
6730 * until we hit the minimum or maximum frequencies.
6732 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
6733 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6735 rps->cur_freq = val;
6736 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6741 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6745 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
6746 "Odd GPU freq value\n"))
6749 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6751 if (val != dev_priv->gt_pm.rps.cur_freq) {
6752 vlv_punit_get(dev_priv);
6753 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
6754 vlv_punit_put(dev_priv);
6758 gen6_set_rps_thresholds(dev_priv, val);
6761 dev_priv->gt_pm.rps.cur_freq = val;
6762 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6767 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
6769 * * If Gfx is Idle, then
6770 * 1. Forcewake Media well.
6771 * 2. Request idle freq.
6772 * 3. Release Forcewake of Media well.
6774 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
6776 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6777 u32 val = rps->idle_freq;
6780 if (rps->cur_freq <= val)
6783 /* The punit delays the write of the frequency and voltage until it
6784 * determines the GPU is awake. During normal usage we don't want to
6785 * waste power changing the frequency if the GPU is sleeping (rc6).
6786 * However, the GPU and driver is now idle and we do not want to delay
6787 * switching to minimum voltage (reducing power whilst idle) as we do
6788 * not expect to be woken in the near future and so must flush the
6789 * change by waking the device.
6791 * We choose to take the media powerwell (either would do to trick the
6792 * punit into committing the voltage change) as that takes a lot less
6793 * power than the render powerwell.
6795 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
6796 err = valleyview_set_rps(dev_priv, val);
6797 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
6800 DRM_ERROR("Failed to set RPS for idle\n");
6803 void gen6_rps_busy(struct drm_i915_private *dev_priv)
6805 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6807 mutex_lock(&rps->lock);
6811 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
6812 gen6_rps_reset_ei(dev_priv);
6813 I915_WRITE(GEN6_PMINTRMSK,
6814 gen6_rps_pm_mask(dev_priv, rps->cur_freq));
6816 gen6_enable_rps_interrupts(dev_priv);
6818 /* Use the user's desired frequency as a guide, but for better
6819 * performance, jump directly to RPe as our starting frequency.
6821 freq = max(rps->cur_freq,
6822 rps->efficient_freq);
6824 if (intel_set_rps(dev_priv,
6826 rps->min_freq_softlimit,
6827 rps->max_freq_softlimit)))
6828 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6830 mutex_unlock(&rps->lock);
6833 void gen6_rps_idle(struct drm_i915_private *dev_priv)
6835 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6837 /* Flush our bottom-half so that it does not race with us
6838 * setting the idle frequency and so that it is bounded by
6839 * our rpm wakeref. And then disable the interrupts to stop any
6840 * futher RPS reclocking whilst we are asleep.
6842 gen6_disable_rps_interrupts(dev_priv);
6844 mutex_lock(&rps->lock);
6846 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6847 vlv_set_rps_idle(dev_priv);
6849 gen6_set_rps(dev_priv, rps->idle_freq);
6851 I915_WRITE(GEN6_PMINTRMSK,
6852 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
6854 mutex_unlock(&rps->lock);
6857 void gen6_rps_boost(struct i915_request *rq)
6859 struct intel_rps *rps = &rq->i915->gt_pm.rps;
6860 unsigned long flags;
6863 /* This is intentionally racy! We peek at the state here, then
6864 * validate inside the RPS worker.
6869 if (i915_request_signaled(rq))
6872 /* Serializes with i915_request_retire() */
6874 spin_lock_irqsave(&rq->lock, flags);
6875 if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
6876 boost = !atomic_fetch_inc(&rps->num_waiters);
6877 rq->waitboost = true;
6879 spin_unlock_irqrestore(&rq->lock, flags);
6883 if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
6884 schedule_work(&rps->work);
6886 atomic_inc(&rps->boosts);
6889 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6891 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6894 lockdep_assert_held(&rps->lock);
6895 GEM_BUG_ON(val > rps->max_freq);
6896 GEM_BUG_ON(val < rps->min_freq);
6898 if (!rps->enabled) {
6899 rps->cur_freq = val;
6903 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6904 err = valleyview_set_rps(dev_priv, val);
6906 err = gen6_set_rps(dev_priv, val);
6911 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
6913 I915_WRITE(GEN6_RC_CONTROL, 0);
6914 I915_WRITE(GEN9_PG_ENABLE, 0);
6917 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
6919 I915_WRITE(GEN6_RP_CONTROL, 0);
6922 static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
6924 I915_WRITE(GEN6_RC_CONTROL, 0);
6927 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
6929 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6930 I915_WRITE(GEN6_RP_CONTROL, 0);
6933 static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
6935 I915_WRITE(GEN6_RC_CONTROL, 0);
6938 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
6940 I915_WRITE(GEN6_RP_CONTROL, 0);
6943 static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
6945 /* We're doing forcewake before Disabling RC6,
6946 * This what the BIOS expects when going into suspend */
6947 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
6949 I915_WRITE(GEN6_RC_CONTROL, 0);
6951 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
6954 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
6956 I915_WRITE(GEN6_RP_CONTROL, 0);
6959 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
6961 bool enable_rc6 = true;
6962 unsigned long rc6_ctx_base;
6966 rc_ctl = I915_READ(GEN6_RC_CONTROL);
6967 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
6968 RC_SW_TARGET_STATE_SHIFT;
6969 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
6970 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
6971 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
6972 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
6975 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
6976 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
6981 * The exact context size is not known for BXT, so assume a page size
6984 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
6985 if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
6986 (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
6987 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
6991 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
6992 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
6993 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
6994 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
6995 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
6999 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
7000 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
7001 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
7002 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
7006 if (!I915_READ(GEN6_GFXPAUSE)) {
7007 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
7011 if (!I915_READ(GEN8_MISC_CTRL0)) {
7012 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
7019 static bool sanitize_rc6(struct drm_i915_private *i915)
7021 struct intel_device_info *info = mkwrite_device_info(i915);
7023 /* Powersaving is controlled by the host when inside a VM */
7024 if (intel_vgpu_active(i915)) {
7026 info->has_rps = false;
7029 if (info->has_rc6 &&
7030 IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
7031 DRM_INFO("RC6 disabled by BIOS\n");
7036 * We assume that we do not have any deep rc6 levels if we don't have
7037 * have the previous rc6 level supported, i.e. we use HAS_RC6()
7038 * as the initial coarse check for rc6 in general, moving on to
7039 * progressively finer/deeper levels.
7041 if (!info->has_rc6 && info->has_rc6p)
7044 return info->has_rc6;
7047 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
7049 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7051 /* All of these values are in units of 50MHz */
7053 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
7054 if (IS_GEN9_LP(dev_priv)) {
7055 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
7056 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
7057 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
7058 rps->min_freq = (rp_state_cap >> 0) & 0xff;
7060 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7061 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
7062 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
7063 rps->min_freq = (rp_state_cap >> 16) & 0xff;
7065 /* hw_max = RP0 until we check for overclocking */
7066 rps->max_freq = rps->rp0_freq;
7068 rps->efficient_freq = rps->rp1_freq;
7069 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
7070 IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7071 u32 ddcc_status = 0;
7073 if (sandybridge_pcode_read(dev_priv,
7074 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
7076 rps->efficient_freq =
7078 ((ddcc_status >> 8) & 0xff),
7083 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7084 /* Store the frequency values in 16.66 MHZ units, which is
7085 * the natural hardware unit for SKL
7087 rps->rp0_freq *= GEN9_FREQ_SCALER;
7088 rps->rp1_freq *= GEN9_FREQ_SCALER;
7089 rps->min_freq *= GEN9_FREQ_SCALER;
7090 rps->max_freq *= GEN9_FREQ_SCALER;
7091 rps->efficient_freq *= GEN9_FREQ_SCALER;
7095 static void reset_rps(struct drm_i915_private *dev_priv,
7096 int (*set)(struct drm_i915_private *, u8))
7098 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7099 u8 freq = rps->cur_freq;
7102 rps->power.mode = -1;
7105 if (set(dev_priv, freq))
7106 DRM_ERROR("Failed to reset RPS to initial values\n");
7109 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
7110 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
7112 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7114 /* Program defaults and thresholds for RPS */
7115 if (IS_GEN(dev_priv, 9))
7116 I915_WRITE(GEN6_RC_VIDEO_FREQ,
7117 GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
7119 /* 1 second timeout*/
7120 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
7121 GT_INTERVAL_FROM_US(dev_priv, 1000000));
7123 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
7125 /* Leaning on the below call to gen6_set_rps to program/setup the
7126 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
7127 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
7128 reset_rps(dev_priv, gen6_set_rps);
7130 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7133 static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
7135 struct intel_engine_cs *engine;
7136 enum intel_engine_id id;
7138 /* 1a: Software RC state - RC0 */
7139 I915_WRITE(GEN6_RC_STATE, 0);
7142 * 1b: Get forcewake during program sequence. Although the driver
7143 * hasn't enabled a state yet where we need forcewake, BIOS may have.
7145 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7147 /* 2a: Disable RC states. */
7148 I915_WRITE(GEN6_RC_CONTROL, 0);
7150 /* 2b: Program RC6 thresholds.*/
7151 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
7152 I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
7154 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7155 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7156 for_each_engine(engine, dev_priv, id)
7157 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7159 if (HAS_GUC(dev_priv))
7160 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
7162 I915_WRITE(GEN6_RC_SLEEP, 0);
7164 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
7167 * 2c: Program Coarse Power Gating Policies.
7169 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
7170 * use instead is a more conservative estimate for the maximum time
7171 * it takes us to service a CS interrupt and submit a new ELSP - that
7172 * is the time which the GPU is idle waiting for the CPU to select the
7173 * next request to execute. If the idle hysteresis is less than that
7174 * interrupt service latency, the hardware will automatically gate
7175 * the power well and we will then incur the wake up cost on top of
7176 * the service latency. A similar guide from intel_pstate is that we
7177 * do not want the enable hysteresis to less than the wakeup latency.
7179 * igt/gem_exec_nop/sequential provides a rough estimate for the
7180 * service latency, and puts it around 10us for Broadwell (and other
7181 * big core) and around 40us for Broxton (and other low power cores).
7182 * [Note that for legacy ringbuffer submission, this is less than 1us!]
7183 * However, the wakeup latency on Broxton is closer to 100us. To be
7184 * conservative, we have to factor in a context switch on top (due
7187 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
7188 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
7190 /* 3a: Enable RC6 */
7191 I915_WRITE(GEN6_RC_CONTROL,
7192 GEN6_RC_CTL_HW_ENABLE |
7193 GEN6_RC_CTL_RC6_ENABLE |
7194 GEN6_RC_CTL_EI_MODE(1));
7196 /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
7197 I915_WRITE(GEN9_PG_ENABLE,
7198 GEN9_RENDER_PG_ENABLE |
7199 GEN9_MEDIA_PG_ENABLE |
7200 GEN11_MEDIA_SAMPLER_PG_ENABLE);
7202 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7205 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
7207 struct intel_engine_cs *engine;
7208 enum intel_engine_id id;
7211 /* 1a: Software RC state - RC0 */
7212 I915_WRITE(GEN6_RC_STATE, 0);
7214 /* 1b: Get forcewake during program sequence. Although the driver
7215 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7216 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7218 /* 2a: Disable RC states. */
7219 I915_WRITE(GEN6_RC_CONTROL, 0);
7221 /* 2b: Program RC6 thresholds.*/
7222 if (INTEL_GEN(dev_priv) >= 10) {
7223 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
7224 I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
7225 } else if (IS_SKYLAKE(dev_priv)) {
7227 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
7228 * when CPG is enabled
7230 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
7232 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
7235 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7236 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7237 for_each_engine(engine, dev_priv, id)
7238 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7240 if (HAS_GUC(dev_priv))
7241 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
7243 I915_WRITE(GEN6_RC_SLEEP, 0);
7246 * 2c: Program Coarse Power Gating Policies.
7248 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
7249 * use instead is a more conservative estimate for the maximum time
7250 * it takes us to service a CS interrupt and submit a new ELSP - that
7251 * is the time which the GPU is idle waiting for the CPU to select the
7252 * next request to execute. If the idle hysteresis is less than that
7253 * interrupt service latency, the hardware will automatically gate
7254 * the power well and we will then incur the wake up cost on top of
7255 * the service latency. A similar guide from intel_pstate is that we
7256 * do not want the enable hysteresis to less than the wakeup latency.
7258 * igt/gem_exec_nop/sequential provides a rough estimate for the
7259 * service latency, and puts it around 10us for Broadwell (and other
7260 * big core) and around 40us for Broxton (and other low power cores).
7261 * [Note that for legacy ringbuffer submission, this is less than 1us!]
7262 * However, the wakeup latency on Broxton is closer to 100us. To be
7263 * conservative, we have to factor in a context switch on top (due
7266 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
7267 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
7269 /* 3a: Enable RC6 */
7270 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
7272 /* WaRsUseTimeoutMode:cnl (pre-prod) */
7273 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
7274 rc6_mode = GEN7_RC_CTL_TO_MODE;
7276 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
7278 I915_WRITE(GEN6_RC_CONTROL,
7279 GEN6_RC_CTL_HW_ENABLE |
7280 GEN6_RC_CTL_RC6_ENABLE |
7284 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
7285 * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
7287 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
7288 I915_WRITE(GEN9_PG_ENABLE, 0);
7290 I915_WRITE(GEN9_PG_ENABLE,
7291 GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
7293 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7296 static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
7298 struct intel_engine_cs *engine;
7299 enum intel_engine_id id;
7301 /* 1a: Software RC state - RC0 */
7302 I915_WRITE(GEN6_RC_STATE, 0);
7304 /* 1b: Get forcewake during program sequence. Although the driver
7305 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7306 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7308 /* 2a: Disable RC states. */
7309 I915_WRITE(GEN6_RC_CONTROL, 0);
7311 /* 2b: Program RC6 thresholds.*/
7312 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
7313 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7314 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7315 for_each_engine(engine, dev_priv, id)
7316 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7317 I915_WRITE(GEN6_RC_SLEEP, 0);
7318 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
7322 I915_WRITE(GEN6_RC_CONTROL,
7323 GEN6_RC_CTL_HW_ENABLE |
7324 GEN7_RC_CTL_TO_MODE |
7325 GEN6_RC_CTL_RC6_ENABLE);
7327 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7330 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
7332 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7334 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7336 /* 1 Program defaults and thresholds for RPS*/
7337 I915_WRITE(GEN6_RPNSWREQ,
7338 HSW_FREQUENCY(rps->rp1_freq));
7339 I915_WRITE(GEN6_RC_VIDEO_FREQ,
7340 HSW_FREQUENCY(rps->rp1_freq));
7341 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
7342 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
7344 /* Docs recommend 900MHz, and 300 MHz respectively */
7345 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7346 rps->max_freq_softlimit << 24 |
7347 rps->min_freq_softlimit << 16);
7349 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
7350 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
7351 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
7352 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
7354 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7357 I915_WRITE(GEN6_RP_CONTROL,
7358 GEN6_RP_MEDIA_TURBO |
7359 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7360 GEN6_RP_MEDIA_IS_GFX |
7362 GEN6_RP_UP_BUSY_AVG |
7363 GEN6_RP_DOWN_IDLE_AVG);
7365 reset_rps(dev_priv, gen6_set_rps);
7367 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7370 static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
7372 struct intel_engine_cs *engine;
7373 enum intel_engine_id id;
7374 u32 rc6vids, rc6_mask;
7378 I915_WRITE(GEN6_RC_STATE, 0);
7380 /* Clear the DBG now so we don't confuse earlier errors */
7381 gtfifodbg = I915_READ(GTFIFODBG);
7383 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
7384 I915_WRITE(GTFIFODBG, gtfifodbg);
7387 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7389 /* disable the counters and set deterministic thresholds */
7390 I915_WRITE(GEN6_RC_CONTROL, 0);
7392 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7393 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7394 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7395 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7396 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7398 for_each_engine(engine, dev_priv, id)
7399 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7401 I915_WRITE(GEN6_RC_SLEEP, 0);
7402 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7403 if (IS_IVYBRIDGE(dev_priv))
7404 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
7406 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7407 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
7408 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7410 /* We don't use those on Haswell */
7411 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
7412 if (HAS_RC6p(dev_priv))
7413 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
7414 if (HAS_RC6pp(dev_priv))
7415 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
7416 I915_WRITE(GEN6_RC_CONTROL,
7418 GEN6_RC_CTL_EI_MODE(1) |
7419 GEN6_RC_CTL_HW_ENABLE);
7422 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
7423 if (IS_GEN(dev_priv, 6) && ret) {
7424 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
7425 } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
7426 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
7427 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
7428 rc6vids &= 0xffff00;
7429 rc6vids |= GEN6_ENCODE_RC6_VID(450);
7430 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
7432 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
7435 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7438 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
7440 /* Here begins a magic sequence of register writes to enable
7441 * auto-downclocking.
7443 * Perhaps there might be some value in exposing these to
7446 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7448 /* Power down if completely idle for over 50ms */
7449 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
7450 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7452 reset_rps(dev_priv, gen6_set_rps);
7454 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7457 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7459 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7460 const int min_freq = 15;
7461 const int scaling_factor = 180;
7462 unsigned int gpu_freq;
7463 unsigned int max_ia_freq, min_ring_freq;
7464 unsigned int max_gpu_freq, min_gpu_freq;
7465 struct cpufreq_policy *policy;
7467 lockdep_assert_held(&rps->lock);
7469 if (rps->max_freq <= rps->min_freq)
7472 policy = cpufreq_cpu_get(0);
7474 max_ia_freq = policy->cpuinfo.max_freq;
7475 cpufreq_cpu_put(policy);
7478 * Default to measured freq if none found, PCU will ensure we
7481 max_ia_freq = tsc_khz;
7484 /* Convert from kHz to MHz */
7485 max_ia_freq /= 1000;
7487 min_ring_freq = I915_READ(DCLK) & 0xf;
7488 /* convert DDR frequency from units of 266.6MHz to bandwidth */
7489 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
7491 min_gpu_freq = rps->min_freq;
7492 max_gpu_freq = rps->max_freq;
7493 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7494 /* Convert GT frequency to 50 HZ units */
7495 min_gpu_freq /= GEN9_FREQ_SCALER;
7496 max_gpu_freq /= GEN9_FREQ_SCALER;
7500 * For each potential GPU frequency, load a ring frequency we'd like
7501 * to use for memory access. We do this by specifying the IA frequency
7502 * the PCU should use as a reference to determine the ring frequency.
7504 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
7505 const int diff = max_gpu_freq - gpu_freq;
7506 unsigned int ia_freq = 0, ring_freq = 0;
7508 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7510 * ring_freq = 2 * GT. ring_freq is in 100MHz units
7511 * No floor required for ring frequency on SKL.
7513 ring_freq = gpu_freq;
7514 } else if (INTEL_GEN(dev_priv) >= 8) {
7515 /* max(2 * GT, DDR). NB: GT is 50MHz units */
7516 ring_freq = max(min_ring_freq, gpu_freq);
7517 } else if (IS_HASWELL(dev_priv)) {
7518 ring_freq = mult_frac(gpu_freq, 5, 4);
7519 ring_freq = max(min_ring_freq, ring_freq);
7520 /* leave ia_freq as the default, chosen by cpufreq */
7522 /* On older processors, there is no separate ring
7523 * clock domain, so in order to boost the bandwidth
7524 * of the ring, we need to upclock the CPU (ia_freq).
7526 * For GPU frequencies less than 750MHz,
7527 * just use the lowest ring freq.
7529 if (gpu_freq < min_freq)
7532 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7533 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7536 sandybridge_pcode_write(dev_priv,
7537 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
7538 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
7539 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
7544 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
7548 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
7550 switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
7552 /* (2 * 4) config */
7553 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
7556 /* (2 * 6) config */
7557 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
7560 /* (2 * 8) config */
7562 /* Setting (2 * 8) Min RP0 for any other combination */
7563 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
7567 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
7572 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
7576 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
7577 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
7582 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
7586 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
7587 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
7592 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
7596 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
7597 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
7598 FB_GFX_FREQ_FUSE_MASK);
7603 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
7607 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
7609 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
7614 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
7618 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
7620 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
7622 rp0 = min_t(u32, rp0, 0xea);
7627 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
7631 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
7632 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
7633 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
7634 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
7639 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
7643 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
7645 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
7646 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
7647 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
7648 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
7649 * to make sure it matches what Punit accepts.
7651 return max_t(u32, val, 0xc0);
7654 /* Check that the pctx buffer wasn't move under us. */
7655 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
7657 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
7659 WARN_ON(pctx_addr != dev_priv->dsm.start +
7660 dev_priv->vlv_pctx->stolen->start);
7664 /* Check that the pcbr address is not empty. */
7665 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
7667 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
7669 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
7672 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
7674 resource_size_t pctx_paddr, paddr;
7675 resource_size_t pctx_size = 32*1024;
7678 pcbr = I915_READ(VLV_PCBR);
7679 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
7680 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7681 paddr = dev_priv->dsm.end + 1 - pctx_size;
7682 GEM_BUG_ON(paddr > U32_MAX);
7684 pctx_paddr = (paddr & (~4095));
7685 I915_WRITE(VLV_PCBR, pctx_paddr);
7688 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7691 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
7693 struct drm_i915_gem_object *pctx;
7694 resource_size_t pctx_paddr;
7695 resource_size_t pctx_size = 24*1024;
7698 pcbr = I915_READ(VLV_PCBR);
7700 /* BIOS set it up already, grab the pre-alloc'd space */
7701 resource_size_t pcbr_offset;
7703 pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
7704 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
7706 I915_GTT_OFFSET_NONE,
7711 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7714 * From the Gunit register HAS:
7715 * The Gfx driver is expected to program this register and ensure
7716 * proper allocation within Gfx stolen memory. For example, this
7717 * register should be programmed such than the PCBR range does not
7718 * overlap with other ranges, such as the frame buffer, protected
7719 * memory, or any other relevant ranges.
7721 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
7723 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
7727 GEM_BUG_ON(range_overflows_t(u64,
7728 dev_priv->dsm.start,
7729 pctx->stolen->start,
7731 pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
7732 I915_WRITE(VLV_PCBR, pctx_paddr);
7735 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7736 dev_priv->vlv_pctx = pctx;
7739 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
7741 struct drm_i915_gem_object *pctx;
7743 pctx = fetch_and_zero(&dev_priv->vlv_pctx);
7745 i915_gem_object_put(pctx);
7748 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
7750 dev_priv->gt_pm.rps.gpll_ref_freq =
7751 vlv_get_cck_clock(dev_priv, "GPLL ref",
7752 CCK_GPLL_CLOCK_CONTROL,
7753 dev_priv->czclk_freq);
7755 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7756 dev_priv->gt_pm.rps.gpll_ref_freq);
7759 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7761 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7764 valleyview_setup_pctx(dev_priv);
7766 vlv_iosf_sb_get(dev_priv,
7767 BIT(VLV_IOSF_SB_PUNIT) |
7768 BIT(VLV_IOSF_SB_NC) |
7769 BIT(VLV_IOSF_SB_CCK));
7771 vlv_init_gpll_ref_freq(dev_priv);
7773 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7774 switch ((val >> 6) & 3) {
7777 dev_priv->mem_freq = 800;
7780 dev_priv->mem_freq = 1066;
7783 dev_priv->mem_freq = 1333;
7786 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7788 rps->max_freq = valleyview_rps_max_freq(dev_priv);
7789 rps->rp0_freq = rps->max_freq;
7790 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7791 intel_gpu_freq(dev_priv, rps->max_freq),
7794 rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
7795 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7796 intel_gpu_freq(dev_priv, rps->efficient_freq),
7797 rps->efficient_freq);
7799 rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
7800 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7801 intel_gpu_freq(dev_priv, rps->rp1_freq),
7804 rps->min_freq = valleyview_rps_min_freq(dev_priv);
7805 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7806 intel_gpu_freq(dev_priv, rps->min_freq),
7809 vlv_iosf_sb_put(dev_priv,
7810 BIT(VLV_IOSF_SB_PUNIT) |
7811 BIT(VLV_IOSF_SB_NC) |
7812 BIT(VLV_IOSF_SB_CCK));
7815 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7817 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7820 cherryview_setup_pctx(dev_priv);
7822 vlv_iosf_sb_get(dev_priv,
7823 BIT(VLV_IOSF_SB_PUNIT) |
7824 BIT(VLV_IOSF_SB_NC) |
7825 BIT(VLV_IOSF_SB_CCK));
7827 vlv_init_gpll_ref_freq(dev_priv);
7829 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
7831 switch ((val >> 2) & 0x7) {
7833 dev_priv->mem_freq = 2000;
7836 dev_priv->mem_freq = 1600;
7839 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7841 rps->max_freq = cherryview_rps_max_freq(dev_priv);
7842 rps->rp0_freq = rps->max_freq;
7843 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7844 intel_gpu_freq(dev_priv, rps->max_freq),
7847 rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
7848 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7849 intel_gpu_freq(dev_priv, rps->efficient_freq),
7850 rps->efficient_freq);
7852 rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
7853 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7854 intel_gpu_freq(dev_priv, rps->rp1_freq),
7857 rps->min_freq = cherryview_rps_min_freq(dev_priv);
7858 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7859 intel_gpu_freq(dev_priv, rps->min_freq),
7862 vlv_iosf_sb_put(dev_priv,
7863 BIT(VLV_IOSF_SB_PUNIT) |
7864 BIT(VLV_IOSF_SB_NC) |
7865 BIT(VLV_IOSF_SB_CCK));
7867 WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
7869 "Odd GPU freq values\n");
7872 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7874 valleyview_cleanup_pctx(dev_priv);
7877 static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
7879 struct intel_engine_cs *engine;
7880 enum intel_engine_id id;
7881 u32 gtfifodbg, rc6_mode, pcbr;
7883 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
7884 GT_FIFO_FREE_ENTRIES_CHV);
7886 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7888 I915_WRITE(GTFIFODBG, gtfifodbg);
7891 cherryview_check_pctx(dev_priv);
7893 /* 1a & 1b: Get forcewake during program sequence. Although the driver
7894 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7895 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7897 /* Disable RC states. */
7898 I915_WRITE(GEN6_RC_CONTROL, 0);
7900 /* 2a: Program RC6 thresholds.*/
7901 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
7902 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7903 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7905 for_each_engine(engine, dev_priv, id)
7906 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7907 I915_WRITE(GEN6_RC_SLEEP, 0);
7909 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
7910 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
7912 /* Allows RC6 residency counter to work */
7913 I915_WRITE(VLV_COUNTER_CONTROL,
7914 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7915 VLV_MEDIA_RC6_COUNT_EN |
7916 VLV_RENDER_RC6_COUNT_EN));
7918 /* For now we assume BIOS is allocating and populating the PCBR */
7919 pcbr = I915_READ(VLV_PCBR);
7923 if (pcbr >> VLV_PCBR_ADDR_SHIFT)
7924 rc6_mode = GEN7_RC_CTL_TO_MODE;
7925 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7927 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7930 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
7934 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7936 /* 1: Program defaults and thresholds for RPS*/
7937 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7938 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7939 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7940 I915_WRITE(GEN6_RP_UP_EI, 66000);
7941 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7943 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7946 I915_WRITE(GEN6_RP_CONTROL,
7947 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7948 GEN6_RP_MEDIA_IS_GFX |
7950 GEN6_RP_UP_BUSY_AVG |
7951 GEN6_RP_DOWN_IDLE_AVG);
7953 /* Setting Fixed Bias */
7954 vlv_punit_get(dev_priv);
7956 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
7957 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7959 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7961 vlv_punit_put(dev_priv);
7963 /* RPS code assumes GPLL is used */
7964 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7966 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7967 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7969 reset_rps(dev_priv, valleyview_set_rps);
7971 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7974 static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
7976 struct intel_engine_cs *engine;
7977 enum intel_engine_id id;
7980 valleyview_check_pctx(dev_priv);
7982 gtfifodbg = I915_READ(GTFIFODBG);
7984 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7986 I915_WRITE(GTFIFODBG, gtfifodbg);
7989 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7991 /* Disable RC states. */
7992 I915_WRITE(GEN6_RC_CONTROL, 0);
7994 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
7995 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7996 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7998 for_each_engine(engine, dev_priv, id)
7999 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
8001 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
8003 /* Allows RC6 residency counter to work */
8004 I915_WRITE(VLV_COUNTER_CONTROL,
8005 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
8006 VLV_MEDIA_RC0_COUNT_EN |
8007 VLV_RENDER_RC0_COUNT_EN |
8008 VLV_MEDIA_RC6_COUNT_EN |
8009 VLV_RENDER_RC6_COUNT_EN));
8011 I915_WRITE(GEN6_RC_CONTROL,
8012 GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
8014 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
8017 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
8021 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
8023 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8024 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
8025 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
8026 I915_WRITE(GEN6_RP_UP_EI, 66000);
8027 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
8029 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8031 I915_WRITE(GEN6_RP_CONTROL,
8032 GEN6_RP_MEDIA_TURBO |
8033 GEN6_RP_MEDIA_HW_NORMAL_MODE |
8034 GEN6_RP_MEDIA_IS_GFX |
8036 GEN6_RP_UP_BUSY_AVG |
8037 GEN6_RP_DOWN_IDLE_CONT);
8039 vlv_punit_get(dev_priv);
8041 /* Setting Fixed Bias */
8042 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
8043 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
8045 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
8047 vlv_punit_put(dev_priv);
8049 /* RPS code assumes GPLL is used */
8050 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
8052 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
8053 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
8055 reset_rps(dev_priv, valleyview_set_rps);
8057 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
8060 static unsigned long intel_pxfreq(u32 vidfreq)
8063 int div = (vidfreq & 0x3f0000) >> 16;
8064 int post = (vidfreq & 0x3000) >> 12;
8065 int pre = (vidfreq & 0x7);
8070 freq = ((div * 133333) / ((1<<post) * pre));
8075 static const struct cparams {
8081 { 1, 1333, 301, 28664 },
8082 { 1, 1066, 294, 24460 },
8083 { 1, 800, 294, 25192 },
8084 { 0, 1333, 276, 27605 },
8085 { 0, 1066, 276, 27605 },
8086 { 0, 800, 231, 23784 },
8089 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
8091 u64 total_count, diff, ret;
8092 u32 count1, count2, count3, m = 0, c = 0;
8093 unsigned long now = jiffies_to_msecs(jiffies), diff1;
8096 lockdep_assert_held(&mchdev_lock);
8098 diff1 = now - dev_priv->ips.last_time1;
8100 /* Prevent division-by-zero if we are asking too fast.
8101 * Also, we don't get interesting results if we are polling
8102 * faster than once in 10ms, so just return the saved value
8106 return dev_priv->ips.chipset_power;
8108 count1 = I915_READ(DMIEC);
8109 count2 = I915_READ(DDREC);
8110 count3 = I915_READ(CSIEC);
8112 total_count = count1 + count2 + count3;
8114 /* FIXME: handle per-counter overflow */
8115 if (total_count < dev_priv->ips.last_count1) {
8116 diff = ~0UL - dev_priv->ips.last_count1;
8117 diff += total_count;
8119 diff = total_count - dev_priv->ips.last_count1;
8122 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
8123 if (cparams[i].i == dev_priv->ips.c_m &&
8124 cparams[i].t == dev_priv->ips.r_t) {
8131 diff = div_u64(diff, diff1);
8132 ret = ((m * diff) + c);
8133 ret = div_u64(ret, 10);
8135 dev_priv->ips.last_count1 = total_count;
8136 dev_priv->ips.last_time1 = now;
8138 dev_priv->ips.chipset_power = ret;
8143 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
8145 intel_wakeref_t wakeref;
8146 unsigned long val = 0;
8148 if (!IS_GEN(dev_priv, 5))
8151 with_intel_runtime_pm(dev_priv, wakeref) {
8152 spin_lock_irq(&mchdev_lock);
8153 val = __i915_chipset_val(dev_priv);
8154 spin_unlock_irq(&mchdev_lock);
8160 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
8162 unsigned long m, x, b;
8165 tsfs = I915_READ(TSFS);
8167 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
8168 x = I915_READ8(TR1);
8170 b = tsfs & TSFS_INTR_MASK;
8172 return ((m * x) / 127) - b;
8175 static int _pxvid_to_vd(u8 pxvid)
8180 if (pxvid >= 8 && pxvid < 31)
8183 return (pxvid + 2) * 125;
8186 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
8188 const int vd = _pxvid_to_vd(pxvid);
8189 const int vm = vd - 1125;
8191 if (INTEL_INFO(dev_priv)->is_mobile)
8192 return vm > 0 ? vm : 0;
8197 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
8199 u64 now, diff, diffms;
8202 lockdep_assert_held(&mchdev_lock);
8204 now = ktime_get_raw_ns();
8205 diffms = now - dev_priv->ips.last_time2;
8206 do_div(diffms, NSEC_PER_MSEC);
8208 /* Don't divide by 0 */
8212 count = I915_READ(GFXEC);
8214 if (count < dev_priv->ips.last_count2) {
8215 diff = ~0UL - dev_priv->ips.last_count2;
8218 diff = count - dev_priv->ips.last_count2;
8221 dev_priv->ips.last_count2 = count;
8222 dev_priv->ips.last_time2 = now;
8224 /* More magic constants... */
8226 diff = div_u64(diff, diffms * 10);
8227 dev_priv->ips.gfx_power = diff;
8230 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
8232 intel_wakeref_t wakeref;
8234 if (!IS_GEN(dev_priv, 5))
8237 with_intel_runtime_pm(dev_priv, wakeref) {
8238 spin_lock_irq(&mchdev_lock);
8239 __i915_update_gfx_val(dev_priv);
8240 spin_unlock_irq(&mchdev_lock);
8244 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
8246 unsigned long t, corr, state1, corr2, state2;
8249 lockdep_assert_held(&mchdev_lock);
8251 pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
8252 pxvid = (pxvid >> 24) & 0x7f;
8253 ext_v = pvid_to_extvid(dev_priv, pxvid);
8257 t = i915_mch_val(dev_priv);
8259 /* Revel in the empirically derived constants */
8261 /* Correction factor in 1/100000 units */
8263 corr = ((t * 2349) + 135940);
8265 corr = ((t * 964) + 29317);
8267 corr = ((t * 301) + 1004);
8269 corr = corr * ((150142 * state1) / 10000 - 78642);
8271 corr2 = (corr * dev_priv->ips.corr);
8273 state2 = (corr2 * state1) / 10000;
8274 state2 /= 100; /* convert to mW */
8276 __i915_update_gfx_val(dev_priv);
8278 return dev_priv->ips.gfx_power + state2;
8281 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
8283 intel_wakeref_t wakeref;
8284 unsigned long val = 0;
8286 if (!IS_GEN(dev_priv, 5))
8289 with_intel_runtime_pm(dev_priv, wakeref) {
8290 spin_lock_irq(&mchdev_lock);
8291 val = __i915_gfx_val(dev_priv);
8292 spin_unlock_irq(&mchdev_lock);
8298 static struct drm_i915_private __rcu *i915_mch_dev;
8300 static struct drm_i915_private *mchdev_get(void)
8302 struct drm_i915_private *i915;
8305 i915 = rcu_dereference(i915_mch_dev);
8306 if (!kref_get_unless_zero(&i915->drm.ref))
8314 * i915_read_mch_val - return value for IPS use
8316 * Calculate and return a value for the IPS driver to use when deciding whether
8317 * we have thermal and power headroom to increase CPU or GPU power budget.
8319 unsigned long i915_read_mch_val(void)
8321 struct drm_i915_private *i915;
8322 unsigned long chipset_val = 0;
8323 unsigned long graphics_val = 0;
8324 intel_wakeref_t wakeref;
8326 i915 = mchdev_get();
8330 with_intel_runtime_pm(i915, wakeref) {
8331 spin_lock_irq(&mchdev_lock);
8332 chipset_val = __i915_chipset_val(i915);
8333 graphics_val = __i915_gfx_val(i915);
8334 spin_unlock_irq(&mchdev_lock);
8337 drm_dev_put(&i915->drm);
8338 return chipset_val + graphics_val;
8340 EXPORT_SYMBOL_GPL(i915_read_mch_val);
8343 * i915_gpu_raise - raise GPU frequency limit
8345 * Raise the limit; IPS indicates we have thermal headroom.
8347 bool i915_gpu_raise(void)
8349 struct drm_i915_private *i915;
8351 i915 = mchdev_get();
8355 spin_lock_irq(&mchdev_lock);
8356 if (i915->ips.max_delay > i915->ips.fmax)
8357 i915->ips.max_delay--;
8358 spin_unlock_irq(&mchdev_lock);
8360 drm_dev_put(&i915->drm);
8363 EXPORT_SYMBOL_GPL(i915_gpu_raise);
8366 * i915_gpu_lower - lower GPU frequency limit
8368 * IPS indicates we're close to a thermal limit, so throttle back the GPU
8369 * frequency maximum.
8371 bool i915_gpu_lower(void)
8373 struct drm_i915_private *i915;
8375 i915 = mchdev_get();
8379 spin_lock_irq(&mchdev_lock);
8380 if (i915->ips.max_delay < i915->ips.min_delay)
8381 i915->ips.max_delay++;
8382 spin_unlock_irq(&mchdev_lock);
8384 drm_dev_put(&i915->drm);
8387 EXPORT_SYMBOL_GPL(i915_gpu_lower);
8390 * i915_gpu_busy - indicate GPU business to IPS
8392 * Tell the IPS driver whether or not the GPU is busy.
8394 bool i915_gpu_busy(void)
8396 struct drm_i915_private *i915;
8399 i915 = mchdev_get();
8403 ret = i915->gt.awake;
8405 drm_dev_put(&i915->drm);
8408 EXPORT_SYMBOL_GPL(i915_gpu_busy);
8411 * i915_gpu_turbo_disable - disable graphics turbo
8413 * Disable graphics turbo by resetting the max frequency and setting the
8414 * current frequency to the default.
8416 bool i915_gpu_turbo_disable(void)
8418 struct drm_i915_private *i915;
8421 i915 = mchdev_get();
8425 spin_lock_irq(&mchdev_lock);
8426 i915->ips.max_delay = i915->ips.fstart;
8427 ret = ironlake_set_drps(i915, i915->ips.fstart);
8428 spin_unlock_irq(&mchdev_lock);
8430 drm_dev_put(&i915->drm);
8433 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
8436 * Tells the intel_ips driver that the i915 driver is now loaded, if
8437 * IPS got loaded first.
8439 * This awkward dance is so that neither module has to depend on the
8440 * other in order for IPS to do the appropriate communication of
8441 * GPU turbo limits to i915.
8444 ips_ping_for_i915_load(void)
8448 link = symbol_get(ips_link_to_i915_driver);
8451 symbol_put(ips_link_to_i915_driver);
8455 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
8457 /* We only register the i915 ips part with intel-ips once everything is
8458 * set up, to avoid intel-ips sneaking in and reading bogus values. */
8459 rcu_assign_pointer(i915_mch_dev, dev_priv);
8461 ips_ping_for_i915_load();
8464 void intel_gpu_ips_teardown(void)
8466 rcu_assign_pointer(i915_mch_dev, NULL);
8469 static void intel_init_emon(struct drm_i915_private *dev_priv)
8475 /* Disable to program */
8479 /* Program energy weights for various events */
8480 I915_WRITE(SDEW, 0x15040d00);
8481 I915_WRITE(CSIEW0, 0x007f0000);
8482 I915_WRITE(CSIEW1, 0x1e220004);
8483 I915_WRITE(CSIEW2, 0x04000004);
8485 for (i = 0; i < 5; i++)
8486 I915_WRITE(PEW(i), 0);
8487 for (i = 0; i < 3; i++)
8488 I915_WRITE(DEW(i), 0);
8490 /* Program P-state weights to account for frequency power adjustment */
8491 for (i = 0; i < 16; i++) {
8492 u32 pxvidfreq = I915_READ(PXVFREQ(i));
8493 unsigned long freq = intel_pxfreq(pxvidfreq);
8494 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8499 val *= (freq / 1000);
8501 val /= (127*127*900);
8503 DRM_ERROR("bad pxval: %ld\n", val);
8506 /* Render standby states get 0 weight */
8510 for (i = 0; i < 4; i++) {
8511 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8512 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8513 I915_WRITE(PXW(i), val);
8516 /* Adjust magic regs to magic values (more experimental results) */
8517 I915_WRITE(OGW0, 0);
8518 I915_WRITE(OGW1, 0);
8519 I915_WRITE(EG0, 0x00007f00);
8520 I915_WRITE(EG1, 0x0000000e);
8521 I915_WRITE(EG2, 0x000e0000);
8522 I915_WRITE(EG3, 0x68000300);
8523 I915_WRITE(EG4, 0x42000000);
8524 I915_WRITE(EG5, 0x00140031);
8528 for (i = 0; i < 8; i++)
8529 I915_WRITE(PXWL(i), 0);
8531 /* Enable PMON + select events */
8532 I915_WRITE(ECR, 0x80000019);
8534 lcfuse = I915_READ(LCFUSE02);
8536 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
8539 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
8541 struct intel_rps *rps = &dev_priv->gt_pm.rps;
8544 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
8547 if (!sanitize_rc6(dev_priv)) {
8548 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
8549 pm_runtime_get(&dev_priv->drm.pdev->dev);
8552 /* Initialize RPS limits (for userspace) */
8553 if (IS_CHERRYVIEW(dev_priv))
8554 cherryview_init_gt_powersave(dev_priv);
8555 else if (IS_VALLEYVIEW(dev_priv))
8556 valleyview_init_gt_powersave(dev_priv);
8557 else if (INTEL_GEN(dev_priv) >= 6)
8558 gen6_init_rps_frequencies(dev_priv);
8560 /* Derive initial user preferences/limits from the hardware limits */
8561 rps->max_freq_softlimit = rps->max_freq;
8562 rps->min_freq_softlimit = rps->min_freq;
8564 /* After setting max-softlimit, find the overclock max freq */
8565 if (IS_GEN(dev_priv, 6) ||
8566 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
8569 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
8570 if (params & BIT(31)) { /* OC supported */
8571 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
8572 (rps->max_freq & 0xff) * 50,
8573 (params & 0xff) * 50);
8574 rps->max_freq = params & 0xff;
8578 /* Finally allow us to boost to max by default */
8579 rps->boost_freq = rps->max_freq;
8580 rps->idle_freq = rps->min_freq;
8581 rps->cur_freq = rps->idle_freq;
8584 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
8586 if (IS_VALLEYVIEW(dev_priv))
8587 valleyview_cleanup_gt_powersave(dev_priv);
8589 if (!HAS_RC6(dev_priv))
8590 pm_runtime_put(&dev_priv->drm.pdev->dev);
8593 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
8595 dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
8596 dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
8597 intel_disable_gt_powersave(dev_priv);
8599 if (INTEL_GEN(dev_priv) >= 11)
8600 gen11_reset_rps_interrupts(dev_priv);
8601 else if (INTEL_GEN(dev_priv) >= 6)
8602 gen6_reset_rps_interrupts(dev_priv);
8605 static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
8607 lockdep_assert_held(&i915->gt_pm.rps.lock);
8609 if (!i915->gt_pm.llc_pstate.enabled)
8612 /* Currently there is no HW configuration to be done to disable. */
8614 i915->gt_pm.llc_pstate.enabled = false;
8617 static void intel_disable_rc6(struct drm_i915_private *dev_priv)
8619 lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
8621 if (!dev_priv->gt_pm.rc6.enabled)
8624 if (INTEL_GEN(dev_priv) >= 9)
8625 gen9_disable_rc6(dev_priv);
8626 else if (IS_CHERRYVIEW(dev_priv))
8627 cherryview_disable_rc6(dev_priv);
8628 else if (IS_VALLEYVIEW(dev_priv))
8629 valleyview_disable_rc6(dev_priv);
8630 else if (INTEL_GEN(dev_priv) >= 6)
8631 gen6_disable_rc6(dev_priv);
8633 dev_priv->gt_pm.rc6.enabled = false;
8636 static void intel_disable_rps(struct drm_i915_private *dev_priv)
8638 lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
8640 if (!dev_priv->gt_pm.rps.enabled)
8643 if (INTEL_GEN(dev_priv) >= 9)
8644 gen9_disable_rps(dev_priv);
8645 else if (IS_CHERRYVIEW(dev_priv))
8646 cherryview_disable_rps(dev_priv);
8647 else if (IS_VALLEYVIEW(dev_priv))
8648 valleyview_disable_rps(dev_priv);
8649 else if (INTEL_GEN(dev_priv) >= 6)
8650 gen6_disable_rps(dev_priv);
8651 else if (IS_IRONLAKE_M(dev_priv))
8652 ironlake_disable_drps(dev_priv);
8654 dev_priv->gt_pm.rps.enabled = false;
8657 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
8659 mutex_lock(&dev_priv->gt_pm.rps.lock);
8661 intel_disable_rc6(dev_priv);
8662 intel_disable_rps(dev_priv);
8663 if (HAS_LLC(dev_priv))
8664 intel_disable_llc_pstate(dev_priv);
8666 mutex_unlock(&dev_priv->gt_pm.rps.lock);
8669 static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
8671 lockdep_assert_held(&i915->gt_pm.rps.lock);
8673 if (i915->gt_pm.llc_pstate.enabled)
8676 gen6_update_ring_freq(i915);
8678 i915->gt_pm.llc_pstate.enabled = true;
8681 static void intel_enable_rc6(struct drm_i915_private *dev_priv)
8683 lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
8685 if (dev_priv->gt_pm.rc6.enabled)
8688 if (IS_CHERRYVIEW(dev_priv))
8689 cherryview_enable_rc6(dev_priv);
8690 else if (IS_VALLEYVIEW(dev_priv))
8691 valleyview_enable_rc6(dev_priv);
8692 else if (INTEL_GEN(dev_priv) >= 11)
8693 gen11_enable_rc6(dev_priv);
8694 else if (INTEL_GEN(dev_priv) >= 9)
8695 gen9_enable_rc6(dev_priv);
8696 else if (IS_BROADWELL(dev_priv))
8697 gen8_enable_rc6(dev_priv);
8698 else if (INTEL_GEN(dev_priv) >= 6)
8699 gen6_enable_rc6(dev_priv);
8701 dev_priv->gt_pm.rc6.enabled = true;
8704 static void intel_enable_rps(struct drm_i915_private *dev_priv)
8706 struct intel_rps *rps = &dev_priv->gt_pm.rps;
8708 lockdep_assert_held(&rps->lock);
8713 if (IS_CHERRYVIEW(dev_priv)) {
8714 cherryview_enable_rps(dev_priv);
8715 } else if (IS_VALLEYVIEW(dev_priv)) {
8716 valleyview_enable_rps(dev_priv);
8717 } else if (INTEL_GEN(dev_priv) >= 9) {
8718 gen9_enable_rps(dev_priv);
8719 } else if (IS_BROADWELL(dev_priv)) {
8720 gen8_enable_rps(dev_priv);
8721 } else if (INTEL_GEN(dev_priv) >= 6) {
8722 gen6_enable_rps(dev_priv);
8723 } else if (IS_IRONLAKE_M(dev_priv)) {
8724 ironlake_enable_drps(dev_priv);
8725 intel_init_emon(dev_priv);
8728 WARN_ON(rps->max_freq < rps->min_freq);
8729 WARN_ON(rps->idle_freq > rps->max_freq);
8731 WARN_ON(rps->efficient_freq < rps->min_freq);
8732 WARN_ON(rps->efficient_freq > rps->max_freq);
8734 rps->enabled = true;
8737 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
8739 /* Powersaving is controlled by the host when inside a VM */
8740 if (intel_vgpu_active(dev_priv))
8743 mutex_lock(&dev_priv->gt_pm.rps.lock);
8745 if (HAS_RC6(dev_priv))
8746 intel_enable_rc6(dev_priv);
8747 if (HAS_RPS(dev_priv))
8748 intel_enable_rps(dev_priv);
8749 if (HAS_LLC(dev_priv))
8750 intel_enable_llc_pstate(dev_priv);
8752 mutex_unlock(&dev_priv->gt_pm.rps.lock);
8755 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
8758 * On Ibex Peak and Cougar Point, we need to disable clock
8759 * gating for the panel power sequencer or it will fail to
8760 * start up when no ports are active.
8762 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8765 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
8769 for_each_pipe(dev_priv, pipe) {
8770 I915_WRITE(DSPCNTR(pipe),
8771 I915_READ(DSPCNTR(pipe)) |
8772 DISPPLANE_TRICKLE_FEED_DISABLE);
8774 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
8775 POSTING_READ(DSPSURF(pipe));
8779 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
8781 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8785 * WaFbcDisableDpfcClockGating:ilk
8787 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
8788 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
8789 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
8791 I915_WRITE(PCH_3DCGDIS0,
8792 MARIUNIT_CLOCK_GATE_DISABLE |
8793 SVSMUNIT_CLOCK_GATE_DISABLE);
8794 I915_WRITE(PCH_3DCGDIS1,
8795 VFMUNIT_CLOCK_GATE_DISABLE);
8798 * According to the spec the following bits should be set in
8799 * order to enable memory self-refresh
8800 * The bit 22/21 of 0x42004
8801 * The bit 5 of 0x42020
8802 * The bit 15 of 0x45000
8804 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8805 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8806 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8807 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
8808 I915_WRITE(DISP_ARB_CTL,
8809 (I915_READ(DISP_ARB_CTL) |
8813 * Based on the document from hardware guys the following bits
8814 * should be set unconditionally in order to enable FBC.
8815 * The bit 22 of 0x42000
8816 * The bit 22 of 0x42004
8817 * The bit 7,8,9 of 0x42020.
8819 if (IS_IRONLAKE_M(dev_priv)) {
8820 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
8821 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8822 I915_READ(ILK_DISPLAY_CHICKEN1) |
8824 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8825 I915_READ(ILK_DISPLAY_CHICKEN2) |
8829 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8831 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8832 I915_READ(ILK_DISPLAY_CHICKEN2) |
8833 ILK_ELPIN_409_SELECT);
8834 I915_WRITE(_3D_CHICKEN2,
8835 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8836 _3D_CHICKEN2_WM_READ_PIPELINED);
8838 /* WaDisableRenderCachePipelinedFlush:ilk */
8839 I915_WRITE(CACHE_MODE_0,
8840 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8842 /* WaDisable_RenderCache_OperationalFlush:ilk */
8843 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8845 g4x_disable_trickle_feed(dev_priv);
8847 ibx_init_clock_gating(dev_priv);
8850 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
8856 * On Ibex Peak and Cougar Point, we need to disable clock
8857 * gating for the panel power sequencer or it will fail to
8858 * start up when no ports are active.
8860 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
8861 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
8862 PCH_CPUNIT_CLOCK_GATE_DISABLE);
8863 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8864 DPLS_EDP_PPS_FIX_DIS);
8865 /* The below fixes the weird display corruption, a few pixels shifted
8866 * downward, on (only) LVDS of some HP laptops with IVY.
8868 for_each_pipe(dev_priv, pipe) {
8869 val = I915_READ(TRANS_CHICKEN2(pipe));
8870 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
8871 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8872 if (dev_priv->vbt.fdi_rx_polarity_inverted)
8873 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8874 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
8875 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
8876 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
8877 I915_WRITE(TRANS_CHICKEN2(pipe), val);
8879 /* WADP0ClockGatingDisable */
8880 for_each_pipe(dev_priv, pipe) {
8881 I915_WRITE(TRANS_CHICKEN1(pipe),
8882 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8886 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
8890 tmp = I915_READ(MCH_SSKPD);
8891 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
8892 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
8896 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
8898 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8900 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8902 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8903 I915_READ(ILK_DISPLAY_CHICKEN2) |
8904 ILK_ELPIN_409_SELECT);
8906 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
8907 I915_WRITE(_3D_CHICKEN,
8908 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
8910 /* WaDisable_RenderCache_OperationalFlush:snb */
8911 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8914 * BSpec recoomends 8x4 when MSAA is used,
8915 * however in practice 16x4 seems fastest.
8917 * Note that PS/WM thread counts depend on the WIZ hashing
8918 * disable bit, which we don't touch here, but it's good
8919 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8921 I915_WRITE(GEN6_GT_MODE,
8922 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8924 I915_WRITE(CACHE_MODE_0,
8925 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
8927 I915_WRITE(GEN6_UCGCTL1,
8928 I915_READ(GEN6_UCGCTL1) |
8929 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
8930 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8932 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8933 * gating disable must be set. Failure to set it results in
8934 * flickering pixels due to Z write ordering failures after
8935 * some amount of runtime in the Mesa "fire" demo, and Unigine
8936 * Sanctuary and Tropics, and apparently anything else with
8937 * alpha test or pixel discard.
8939 * According to the spec, bit 11 (RCCUNIT) must also be set,
8940 * but we didn't debug actual testcases to find it out.
8942 * WaDisableRCCUnitClockGating:snb
8943 * WaDisableRCPBUnitClockGating:snb
8945 I915_WRITE(GEN6_UCGCTL2,
8946 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8947 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8949 /* WaStripsFansDisableFastClipPerformanceFix:snb */
8950 I915_WRITE(_3D_CHICKEN3,
8951 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
8955 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
8956 * 3DSTATE_SF number of SF output attributes is more than 16."
8958 I915_WRITE(_3D_CHICKEN3,
8959 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
8962 * According to the spec the following bits should be
8963 * set in order to enable memory self-refresh and fbc:
8964 * The bit21 and bit22 of 0x42000
8965 * The bit21 and bit22 of 0x42004
8966 * The bit5 and bit7 of 0x42020
8967 * The bit14 of 0x70180
8968 * The bit14 of 0x71180
8970 * WaFbcAsynchFlipDisableFbcQueue:snb
8972 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8973 I915_READ(ILK_DISPLAY_CHICKEN1) |
8974 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8975 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8976 I915_READ(ILK_DISPLAY_CHICKEN2) |
8977 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8978 I915_WRITE(ILK_DSPCLK_GATE_D,
8979 I915_READ(ILK_DSPCLK_GATE_D) |
8980 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
8981 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
8983 g4x_disable_trickle_feed(dev_priv);
8985 cpt_init_clock_gating(dev_priv);
8987 gen6_check_mch_setup(dev_priv);
8990 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
8992 u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
8995 * WaVSThreadDispatchOverride:ivb,vlv
8997 * This actually overrides the dispatch
8998 * mode for all thread types.
9000 reg &= ~GEN7_FF_SCHED_MASK;
9001 reg |= GEN7_FF_TS_SCHED_HW;
9002 reg |= GEN7_FF_VS_SCHED_HW;
9003 reg |= GEN7_FF_DS_SCHED_HW;
9005 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
9008 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
9011 * TODO: this bit should only be enabled when really needed, then
9012 * disabled when not needed anymore in order to save power.
9014 if (HAS_PCH_LPT_LP(dev_priv))
9015 I915_WRITE(SOUTH_DSPCLK_GATE_D,
9016 I915_READ(SOUTH_DSPCLK_GATE_D) |
9017 PCH_LP_PARTITION_LEVEL_DISABLE);
9019 /* WADPOClockGatingDisable:hsw */
9020 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
9021 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
9022 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
9025 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
9027 if (HAS_PCH_LPT_LP(dev_priv)) {
9028 u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9030 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9031 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9035 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
9036 int general_prio_credits,
9037 int high_prio_credits)
9042 /* WaTempDisableDOPClkGating:bdw */
9043 misccpctl = I915_READ(GEN7_MISCCPCTL);
9044 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
9046 val = I915_READ(GEN8_L3SQCREG1);
9047 val &= ~L3_PRIO_CREDITS_MASK;
9048 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
9049 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
9050 I915_WRITE(GEN8_L3SQCREG1, val);
9053 * Wait at least 100 clocks before re-enabling clock gating.
9054 * See the definition of L3SQCREG1 in BSpec.
9056 POSTING_READ(GEN8_L3SQCREG1);
9058 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
9061 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
9063 /* This is not an Wa. Enable to reduce Sampler power */
9064 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
9065 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
9067 /* WaEnable32PlaneMode:icl */
9068 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
9069 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
9072 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
9074 if (!HAS_PCH_CNP(dev_priv))
9077 /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
9078 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
9079 CNP_PWM_CGE_GATING_DISABLE);
9082 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
9085 cnp_init_clock_gating(dev_priv);
9087 /* This is not an Wa. Enable for better image quality */
9088 I915_WRITE(_3D_CHICKEN3,
9089 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
9091 /* WaEnableChickenDCPR:cnl */
9092 I915_WRITE(GEN8_CHICKEN_DCPR_1,
9093 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
9095 /* WaFbcWakeMemOn:cnl */
9096 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
9097 DISP_FBC_MEMORY_WAKE);
9099 val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
9100 /* ReadHitWriteOnlyDisable:cnl */
9101 val |= RCCUNIT_CLKGATE_DIS;
9102 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
9103 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
9104 val |= SARBUNIT_CLKGATE_DIS;
9105 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
9107 /* Wa_2201832410:cnl */
9108 val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
9109 val |= GWUNIT_CLKGATE_DIS;
9110 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
9112 /* WaDisableVFclkgate:cnl */
9113 /* WaVFUnitClockGatingDisable:cnl */
9114 val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
9115 val |= VFUNIT_CLKGATE_DIS;
9116 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
9119 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
9121 cnp_init_clock_gating(dev_priv);
9122 gen9_init_clock_gating(dev_priv);
9124 /* WaFbcNukeOnHostModify:cfl */
9125 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
9126 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9129 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
9131 gen9_init_clock_gating(dev_priv);
9133 /* WaDisableSDEUnitClockGating:kbl */
9134 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
9135 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9136 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9138 /* WaDisableGamClockGating:kbl */
9139 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
9140 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
9141 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
9143 /* WaFbcNukeOnHostModify:kbl */
9144 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
9145 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9148 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
9150 gen9_init_clock_gating(dev_priv);
9152 /* WAC6entrylatency:skl */
9153 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
9154 FBC_LLC_FULLY_OPEN);
9156 /* WaFbcNukeOnHostModify:skl */
9157 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
9158 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9161 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
9163 /* The GTT cache must be disabled if the system is using 2M pages. */
9164 bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
9165 I915_GTT_PAGE_SIZE_2M);
9168 /* WaSwitchSolVfFArbitrationPriority:bdw */
9169 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
9171 /* WaPsrDPAMaskVBlankInSRD:bdw */
9172 I915_WRITE(CHICKEN_PAR1_1,
9173 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
9175 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
9176 for_each_pipe(dev_priv, pipe) {
9177 I915_WRITE(CHICKEN_PIPESL_1(pipe),
9178 I915_READ(CHICKEN_PIPESL_1(pipe)) |
9179 BDW_DPRS_MASK_VBLANK_SRD);
9182 /* WaVSRefCountFullforceMissDisable:bdw */
9183 /* WaDSRefCountFullforceMissDisable:bdw */
9184 I915_WRITE(GEN7_FF_THREAD_MODE,
9185 I915_READ(GEN7_FF_THREAD_MODE) &
9186 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
9188 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
9189 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
9191 /* WaDisableSDEUnitClockGating:bdw */
9192 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9193 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9195 /* WaProgramL3SqcReg1Default:bdw */
9196 gen8_set_l3sqc_credits(dev_priv, 30, 2);
9198 /* WaGttCachingOffByDefault:bdw */
9199 I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
9201 /* WaKVMNotificationOnConfigChange:bdw */
9202 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
9203 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
9205 lpt_init_clock_gating(dev_priv);
9207 /* WaDisableDopClockGating:bdw
9209 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
9212 I915_WRITE(GEN6_UCGCTL1,
9213 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
9216 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
9218 /* L3 caching of data atomics doesn't work -- disable it. */
9219 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
9220 I915_WRITE(HSW_ROW_CHICKEN3,
9221 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
9223 /* This is required by WaCatErrorRejectionIssue:hsw */
9224 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
9225 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
9226 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
9228 /* WaVSRefCountFullforceMissDisable:hsw */
9229 I915_WRITE(GEN7_FF_THREAD_MODE,
9230 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
9232 /* WaDisable_RenderCache_OperationalFlush:hsw */
9233 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9235 /* enable HiZ Raw Stall Optimization */
9236 I915_WRITE(CACHE_MODE_0_GEN7,
9237 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
9239 /* WaDisable4x2SubspanOptimization:hsw */
9240 I915_WRITE(CACHE_MODE_1,
9241 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9244 * BSpec recommends 8x4 when MSAA is used,
9245 * however in practice 16x4 seems fastest.
9247 * Note that PS/WM thread counts depend on the WIZ hashing
9248 * disable bit, which we don't touch here, but it's good
9249 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
9251 I915_WRITE(GEN7_GT_MODE,
9252 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9254 /* WaSampleCChickenBitEnable:hsw */
9255 I915_WRITE(HALF_SLICE_CHICKEN3,
9256 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
9258 /* WaSwitchSolVfFArbitrationPriority:hsw */
9259 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
9261 lpt_init_clock_gating(dev_priv);
9264 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
9268 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
9270 /* WaDisableEarlyCull:ivb */
9271 I915_WRITE(_3D_CHICKEN3,
9272 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
9274 /* WaDisableBackToBackFlipFix:ivb */
9275 I915_WRITE(IVB_CHICKEN3,
9276 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
9277 CHICKEN3_DGMG_DONE_FIX_DISABLE);
9279 /* WaDisablePSDDualDispatchEnable:ivb */
9280 if (IS_IVB_GT1(dev_priv))
9281 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
9282 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
9284 /* WaDisable_RenderCache_OperationalFlush:ivb */
9285 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9287 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
9288 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
9289 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
9291 /* WaApplyL3ControlAndL3ChickenMode:ivb */
9292 I915_WRITE(GEN7_L3CNTLREG1,
9293 GEN7_WA_FOR_GEN7_L3_CONTROL);
9294 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
9295 GEN7_WA_L3_CHICKEN_MODE);
9296 if (IS_IVB_GT1(dev_priv))
9297 I915_WRITE(GEN7_ROW_CHICKEN2,
9298 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9300 /* must write both registers */
9301 I915_WRITE(GEN7_ROW_CHICKEN2,
9302 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9303 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
9304 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9307 /* WaForceL3Serialization:ivb */
9308 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
9309 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
9312 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
9313 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
9315 I915_WRITE(GEN6_UCGCTL2,
9316 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
9318 /* This is required by WaCatErrorRejectionIssue:ivb */
9319 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
9320 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
9321 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
9323 g4x_disable_trickle_feed(dev_priv);
9325 gen7_setup_fixed_func_scheduler(dev_priv);
9327 if (0) { /* causes HiZ corruption on ivb:gt1 */
9328 /* enable HiZ Raw Stall Optimization */
9329 I915_WRITE(CACHE_MODE_0_GEN7,
9330 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
9333 /* WaDisable4x2SubspanOptimization:ivb */
9334 I915_WRITE(CACHE_MODE_1,
9335 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9338 * BSpec recommends 8x4 when MSAA is used,
9339 * however in practice 16x4 seems fastest.
9341 * Note that PS/WM thread counts depend on the WIZ hashing
9342 * disable bit, which we don't touch here, but it's good
9343 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
9345 I915_WRITE(GEN7_GT_MODE,
9346 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9348 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
9349 snpcr &= ~GEN6_MBC_SNPCR_MASK;
9350 snpcr |= GEN6_MBC_SNPCR_MED;
9351 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
9353 if (!HAS_PCH_NOP(dev_priv))
9354 cpt_init_clock_gating(dev_priv);
9356 gen6_check_mch_setup(dev_priv);
9359 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
9361 /* WaDisableEarlyCull:vlv */
9362 I915_WRITE(_3D_CHICKEN3,
9363 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
9365 /* WaDisableBackToBackFlipFix:vlv */
9366 I915_WRITE(IVB_CHICKEN3,
9367 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
9368 CHICKEN3_DGMG_DONE_FIX_DISABLE);
9370 /* WaPsdDispatchEnable:vlv */
9371 /* WaDisablePSDDualDispatchEnable:vlv */
9372 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
9373 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
9374 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
9376 /* WaDisable_RenderCache_OperationalFlush:vlv */
9377 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9379 /* WaForceL3Serialization:vlv */
9380 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
9381 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
9383 /* WaDisableDopClockGating:vlv */
9384 I915_WRITE(GEN7_ROW_CHICKEN2,
9385 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9387 /* This is required by WaCatErrorRejectionIssue:vlv */
9388 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
9389 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
9390 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
9392 gen7_setup_fixed_func_scheduler(dev_priv);
9395 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
9396 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
9398 I915_WRITE(GEN6_UCGCTL2,
9399 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
9401 /* WaDisableL3Bank2xClockGate:vlv
9402 * Disabling L3 clock gating- MMIO 940c[25] = 1
9403 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
9404 I915_WRITE(GEN7_UCGCTL4,
9405 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
9408 * BSpec says this must be set, even though
9409 * WaDisable4x2SubspanOptimization isn't listed for VLV.
9411 I915_WRITE(CACHE_MODE_1,
9412 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9415 * BSpec recommends 8x4 when MSAA is used,
9416 * however in practice 16x4 seems fastest.
9418 * Note that PS/WM thread counts depend on the WIZ hashing
9419 * disable bit, which we don't touch here, but it's good
9420 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
9422 I915_WRITE(GEN7_GT_MODE,
9423 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9426 * WaIncreaseL3CreditsForVLVB0:vlv
9427 * This is the hardware default actually.
9429 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
9432 * WaDisableVLVClockGating_VBIIssue:vlv
9433 * Disable clock gating on th GCFG unit to prevent a delay
9434 * in the reporting of vblank events.
9436 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
9439 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
9441 /* WaVSRefCountFullforceMissDisable:chv */
9442 /* WaDSRefCountFullforceMissDisable:chv */
9443 I915_WRITE(GEN7_FF_THREAD_MODE,
9444 I915_READ(GEN7_FF_THREAD_MODE) &
9445 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
9447 /* WaDisableSemaphoreAndSyncFlipWait:chv */
9448 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
9449 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
9451 /* WaDisableCSUnitClockGating:chv */
9452 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
9453 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
9455 /* WaDisableSDEUnitClockGating:chv */
9456 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9457 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9460 * WaProgramL3SqcReg1Default:chv
9461 * See gfxspecs/Related Documents/Performance Guide/
9462 * LSQC Setting Recommendations.
9464 gen8_set_l3sqc_credits(dev_priv, 38, 2);
9467 * GTT cache may not work with big pages, so if those
9468 * are ever enabled GTT cache may need to be disabled.
9470 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
9473 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
9477 I915_WRITE(RENCLK_GATE_D1, 0);
9478 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
9479 GS_UNIT_CLOCK_GATE_DISABLE |
9480 CL_UNIT_CLOCK_GATE_DISABLE);
9481 I915_WRITE(RAMCLK_GATE_D, 0);
9482 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
9483 OVRUNIT_CLOCK_GATE_DISABLE |
9484 OVCUNIT_CLOCK_GATE_DISABLE;
9485 if (IS_GM45(dev_priv))
9486 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
9487 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
9489 /* WaDisableRenderCachePipelinedFlush */
9490 I915_WRITE(CACHE_MODE_0,
9491 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
9493 /* WaDisable_RenderCache_OperationalFlush:g4x */
9494 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9496 g4x_disable_trickle_feed(dev_priv);
9499 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
9501 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
9502 I915_WRITE(RENCLK_GATE_D2, 0);
9503 I915_WRITE(DSPCLK_GATE_D, 0);
9504 I915_WRITE(RAMCLK_GATE_D, 0);
9505 I915_WRITE16(DEUC, 0);
9506 I915_WRITE(MI_ARB_STATE,
9507 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9509 /* WaDisable_RenderCache_OperationalFlush:gen4 */
9510 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9513 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
9515 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
9516 I965_RCC_CLOCK_GATE_DISABLE |
9517 I965_RCPB_CLOCK_GATE_DISABLE |
9518 I965_ISC_CLOCK_GATE_DISABLE |
9519 I965_FBC_CLOCK_GATE_DISABLE);
9520 I915_WRITE(RENCLK_GATE_D2, 0);
9521 I915_WRITE(MI_ARB_STATE,
9522 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9524 /* WaDisable_RenderCache_OperationalFlush:gen4 */
9525 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9528 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
9530 u32 dstate = I915_READ(D_STATE);
9532 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
9533 DSTATE_DOT_CLOCK_GATING;
9534 I915_WRITE(D_STATE, dstate);
9536 if (IS_PINEVIEW(dev_priv))
9537 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
9539 /* IIR "flip pending" means done if this bit is set */
9540 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
9542 /* interrupts should cause a wake up from C3 */
9543 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
9545 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
9546 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
9548 I915_WRITE(MI_ARB_STATE,
9549 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9552 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
9554 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
9556 /* interrupts should cause a wake up from C3 */
9557 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
9558 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
9560 I915_WRITE(MEM_MODE,
9561 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
9564 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
9566 I915_WRITE(MEM_MODE,
9567 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
9568 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
9571 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
9573 dev_priv->display.init_clock_gating(dev_priv);
9576 void intel_suspend_hw(struct drm_i915_private *dev_priv)
9578 if (HAS_PCH_LPT(dev_priv))
9579 lpt_suspend_hw(dev_priv);
9582 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
9584 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
9588 * intel_init_clock_gating_hooks - setup the clock gating hooks
9589 * @dev_priv: device private
9591 * Setup the hooks that configure which clocks of a given platform can be
9592 * gated and also apply various GT and display specific workarounds for these
9593 * platforms. Note that some GT specific workarounds are applied separately
9594 * when GPU contexts or batchbuffers start their execution.
9596 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9598 if (IS_GEN(dev_priv, 11))
9599 dev_priv->display.init_clock_gating = icl_init_clock_gating;
9600 else if (IS_CANNONLAKE(dev_priv))
9601 dev_priv->display.init_clock_gating = cnl_init_clock_gating;
9602 else if (IS_COFFEELAKE(dev_priv))
9603 dev_priv->display.init_clock_gating = cfl_init_clock_gating;
9604 else if (IS_SKYLAKE(dev_priv))
9605 dev_priv->display.init_clock_gating = skl_init_clock_gating;
9606 else if (IS_KABYLAKE(dev_priv))
9607 dev_priv->display.init_clock_gating = kbl_init_clock_gating;
9608 else if (IS_BROXTON(dev_priv))
9609 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
9610 else if (IS_GEMINILAKE(dev_priv))
9611 dev_priv->display.init_clock_gating = glk_init_clock_gating;
9612 else if (IS_BROADWELL(dev_priv))
9613 dev_priv->display.init_clock_gating = bdw_init_clock_gating;
9614 else if (IS_CHERRYVIEW(dev_priv))
9615 dev_priv->display.init_clock_gating = chv_init_clock_gating;
9616 else if (IS_HASWELL(dev_priv))
9617 dev_priv->display.init_clock_gating = hsw_init_clock_gating;
9618 else if (IS_IVYBRIDGE(dev_priv))
9619 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
9620 else if (IS_VALLEYVIEW(dev_priv))
9621 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
9622 else if (IS_GEN(dev_priv, 6))
9623 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9624 else if (IS_GEN(dev_priv, 5))
9625 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
9626 else if (IS_G4X(dev_priv))
9627 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9628 else if (IS_I965GM(dev_priv))
9629 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
9630 else if (IS_I965G(dev_priv))
9631 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
9632 else if (IS_GEN(dev_priv, 3))
9633 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9634 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
9635 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9636 else if (IS_GEN(dev_priv, 2))
9637 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9639 MISSING_CASE(INTEL_DEVID(dev_priv));
9640 dev_priv->display.init_clock_gating = nop_init_clock_gating;
9644 /* Set up chip specific power management-related functions */
9645 void intel_init_pm(struct drm_i915_private *dev_priv)
9648 if (IS_PINEVIEW(dev_priv))
9649 i915_pineview_get_mem_freq(dev_priv);
9650 else if (IS_GEN(dev_priv, 5))
9651 i915_ironlake_get_mem_freq(dev_priv);
9653 /* For FIFO watermark updates */
9654 if (INTEL_GEN(dev_priv) >= 9) {
9655 skl_setup_wm_latency(dev_priv);
9656 dev_priv->display.initial_watermarks = skl_initial_wm;
9657 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
9658 dev_priv->display.compute_global_watermarks = skl_compute_wm;
9659 } else if (HAS_PCH_SPLIT(dev_priv)) {
9660 ilk_setup_wm_latency(dev_priv);
9662 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
9663 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
9664 (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
9665 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
9666 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
9667 dev_priv->display.compute_intermediate_wm =
9668 ilk_compute_intermediate_wm;
9669 dev_priv->display.initial_watermarks =
9670 ilk_initial_watermarks;
9671 dev_priv->display.optimize_watermarks =
9672 ilk_optimize_watermarks;
9674 DRM_DEBUG_KMS("Failed to read display plane latency. "
9677 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9678 vlv_setup_wm_latency(dev_priv);
9679 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
9680 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
9681 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
9682 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
9683 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
9684 } else if (IS_G4X(dev_priv)) {
9685 g4x_setup_wm_latency(dev_priv);
9686 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
9687 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
9688 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
9689 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
9690 } else if (IS_PINEVIEW(dev_priv)) {
9691 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
9694 dev_priv->mem_freq)) {
9695 DRM_INFO("failed to find known CxSR latency "
9696 "(found ddr%s fsb freq %d, mem freq %d), "
9698 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9699 dev_priv->fsb_freq, dev_priv->mem_freq);
9700 /* Disable CxSR and never update its watermark again */
9701 intel_set_memory_cxsr(dev_priv, false);
9702 dev_priv->display.update_wm = NULL;
9704 dev_priv->display.update_wm = pineview_update_wm;
9705 } else if (IS_GEN(dev_priv, 4)) {
9706 dev_priv->display.update_wm = i965_update_wm;
9707 } else if (IS_GEN(dev_priv, 3)) {
9708 dev_priv->display.update_wm = i9xx_update_wm;
9709 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9710 } else if (IS_GEN(dev_priv, 2)) {
9711 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
9712 dev_priv->display.update_wm = i845_update_wm;
9713 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9715 dev_priv->display.update_wm = i9xx_update_wm;
9716 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9719 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
9723 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
9725 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9729 * Slow = Fast = GPLL ref * N
9731 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
9734 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
9736 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9738 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
9741 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
9743 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9747 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
9749 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
9752 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
9754 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9756 /* CHV needs even values */
9757 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
9760 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
9762 if (INTEL_GEN(dev_priv) >= 9)
9763 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
9765 else if (IS_CHERRYVIEW(dev_priv))
9766 return chv_gpu_freq(dev_priv, val);
9767 else if (IS_VALLEYVIEW(dev_priv))
9768 return byt_gpu_freq(dev_priv, val);
9770 return val * GT_FREQUENCY_MULTIPLIER;
9773 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
9775 if (INTEL_GEN(dev_priv) >= 9)
9776 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
9777 GT_FREQUENCY_MULTIPLIER);
9778 else if (IS_CHERRYVIEW(dev_priv))
9779 return chv_freq_opcode(dev_priv, val);
9780 else if (IS_VALLEYVIEW(dev_priv))
9781 return byt_freq_opcode(dev_priv, val);
9783 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
9786 void intel_pm_setup(struct drm_i915_private *dev_priv)
9788 mutex_init(&dev_priv->gt_pm.rps.lock);
9789 mutex_init(&dev_priv->gt_pm.rps.power.mutex);
9791 atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
9793 dev_priv->runtime_pm.suspended = false;
9794 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
9797 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9798 const i915_reg_t reg)
9800 u32 lower, upper, tmp;
9804 * The register accessed do not need forcewake. We borrow
9805 * uncore lock to prevent concurrent access to range reg.
9807 lockdep_assert_held(&dev_priv->uncore.lock);
9810 * vlv and chv residency counters are 40 bits in width.
9811 * With a control bit, we can choose between upper or lower
9812 * 32bit window into this counter.
9814 * Although we always use the counter in high-range mode elsewhere,
9815 * userspace may attempt to read the value before rc6 is initialised,
9816 * before we have set the default VLV_COUNTER_CONTROL value. So always
9817 * set the high bit to be safe.
9819 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9820 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9821 upper = I915_READ_FW(reg);
9825 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9826 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
9827 lower = I915_READ_FW(reg);
9829 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9830 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9831 upper = I915_READ_FW(reg);
9832 } while (upper != tmp && --loop);
9835 * Everywhere else we always use VLV_COUNTER_CONTROL with the
9836 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9840 return lower | (u64)upper << 8;
9843 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
9844 const i915_reg_t reg)
9846 struct intel_uncore *uncore = &dev_priv->uncore;
9847 u64 time_hw, prev_hw, overflow_hw;
9848 unsigned int fw_domains;
9849 unsigned long flags;
9853 if (!HAS_RC6(dev_priv))
9857 * Store previous hw counter values for counter wrap-around handling.
9859 * There are only four interesting registers and they live next to each
9860 * other so we can use the relative address, compared to the smallest
9861 * one as the index into driver storage.
9863 i = (i915_mmio_reg_offset(reg) -
9864 i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
9865 if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
9868 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
9870 spin_lock_irqsave(&uncore->lock, flags);
9871 intel_uncore_forcewake_get__locked(uncore, fw_domains);
9873 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9874 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9876 div = dev_priv->czclk_freq;
9877 overflow_hw = BIT_ULL(40);
9878 time_hw = vlv_residency_raw(dev_priv, reg);
9880 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
9881 if (IS_GEN9_LP(dev_priv)) {
9889 overflow_hw = BIT_ULL(32);
9890 time_hw = intel_uncore_read_fw(uncore, reg);
9894 * Counter wrap handling.
9896 * But relying on a sufficient frequency of queries otherwise counters
9899 prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
9900 dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
9902 /* RC6 delta from last sample. */
9903 if (time_hw >= prev_hw)
9906 time_hw += overflow_hw - prev_hw;
9908 /* Add delta to RC6 extended raw driver copy. */
9909 time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
9910 dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
9912 intel_uncore_forcewake_put__locked(uncore, fw_domains);
9913 spin_unlock_irqrestore(&uncore->lock, flags);
9915 return mul_u64_u32_div(time_hw, mul, div);
9918 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
9921 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
9924 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
9928 if (INTEL_GEN(dev_priv) >= 9)
9929 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
9930 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
9931 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
9933 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;