2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_tv.h"
63 #include "display/intel_vdsc.h"
64 #include "display/intel_vrr.h"
66 #include "gem/i915_gem_lmem.h"
67 #include "gem/i915_gem_object.h"
69 #include "gt/intel_rps.h"
74 #include "intel_acpi.h"
75 #include "intel_atomic.h"
76 #include "intel_atomic_plane.h"
78 #include "intel_cdclk.h"
79 #include "intel_color.h"
80 #include "intel_crtc.h"
81 #include "intel_csr.h"
82 #include "intel_display_types.h"
83 #include "intel_dp_link_training.h"
84 #include "intel_fbc.h"
85 #include "intel_fdi.h"
86 #include "intel_fbdev.h"
87 #include "intel_fifo_underrun.h"
88 #include "intel_frontbuffer.h"
89 #include "intel_hdcp.h"
90 #include "intel_hotplug.h"
91 #include "intel_overlay.h"
92 #include "intel_pipe_crc.h"
94 #include "intel_pps.h"
95 #include "intel_psr.h"
96 #include "intel_quirks.h"
97 #include "intel_sideband.h"
98 #include "intel_sprite.h"
100 #include "intel_vga.h"
101 #include "i9xx_plane.h"
102 #include "skl_scaler.h"
103 #include "skl_universal_plane.h"
105 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
106 struct intel_crtc_state *pipe_config);
107 static void ilk_pch_clock_get(struct intel_crtc *crtc,
108 struct intel_crtc_state *pipe_config);
110 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
111 struct drm_i915_gem_object *obj,
112 struct drm_mode_fb_cmd2 *mode_cmd);
113 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
114 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
115 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
116 const struct intel_link_m_n *m_n,
117 const struct intel_link_m_n *m2_n2);
118 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
119 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
120 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
121 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
122 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
123 static void intel_modeset_setup_hw_state(struct drm_device *dev,
124 struct drm_modeset_acquire_ctx *ctx);
126 /* returns HPLL frequency in kHz */
127 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
129 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
131 /* Obtain SKU information */
132 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
133 CCK_FUSE_HPLL_FREQ_MASK;
135 return vco_freq[hpll_freq] * 1000;
138 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
139 const char *name, u32 reg, int ref_freq)
144 val = vlv_cck_read(dev_priv, reg);
145 divider = val & CCK_FREQUENCY_VALUES;
147 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
148 (divider << CCK_FREQUENCY_STATUS_SHIFT),
149 "%s change in progress\n", name);
151 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
154 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
155 const char *name, u32 reg)
159 vlv_cck_get(dev_priv);
161 if (dev_priv->hpll_freq == 0)
162 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
164 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
166 vlv_cck_put(dev_priv);
171 static void intel_update_czclk(struct drm_i915_private *dev_priv)
173 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
176 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
177 CCK_CZ_CLOCK_CONTROL);
179 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
180 dev_priv->czclk_freq);
183 /* WA Display #0827: Gen9:all */
185 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
188 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
189 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
191 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
192 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
195 /* Wa_2006604312:icl,ehl */
197 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
201 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
202 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
204 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
205 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
209 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
211 return crtc_state->master_transcoder != INVALID_TRANSCODER;
215 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
217 return crtc_state->sync_mode_slaves_mask != 0;
221 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
223 return is_trans_port_sync_master(crtc_state) ||
224 is_trans_port_sync_slave(crtc_state);
227 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
230 i915_reg_t reg = PIPEDSL(pipe);
234 if (DISPLAY_VER(dev_priv) == 2)
235 line_mask = DSL_LINEMASK_GEN2;
237 line_mask = DSL_LINEMASK_GEN3;
239 line1 = intel_de_read(dev_priv, reg) & line_mask;
241 line2 = intel_de_read(dev_priv, reg) & line_mask;
243 return line1 != line2;
246 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
249 enum pipe pipe = crtc->pipe;
251 /* Wait for the display line to settle/start moving */
252 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
253 drm_err(&dev_priv->drm,
254 "pipe %c scanline %s wait timed out\n",
255 pipe_name(pipe), onoff(state));
258 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
260 wait_for_pipe_scanline_moving(crtc, false);
263 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
265 wait_for_pipe_scanline_moving(crtc, true);
269 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
271 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
272 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
274 if (DISPLAY_VER(dev_priv) >= 4) {
275 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
276 i915_reg_t reg = PIPECONF(cpu_transcoder);
278 /* Wait for the Pipe State to go off */
279 if (intel_de_wait_for_clear(dev_priv, reg,
280 I965_PIPECONF_ACTIVE, 100))
281 drm_WARN(&dev_priv->drm, 1,
282 "pipe_off wait timed out\n");
284 intel_wait_for_pipe_scanline_stopped(crtc);
288 /* Only for pre-ILK configs */
289 void assert_pll(struct drm_i915_private *dev_priv,
290 enum pipe pipe, bool state)
295 val = intel_de_read(dev_priv, DPLL(pipe));
296 cur_state = !!(val & DPLL_VCO_ENABLE);
297 I915_STATE_WARN(cur_state != state,
298 "PLL state assertion failure (expected %s, current %s)\n",
299 onoff(state), onoff(cur_state));
302 /* XXX: the dsi pll is shared between MIPI DSI ports */
303 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
308 vlv_cck_get(dev_priv);
309 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
310 vlv_cck_put(dev_priv);
312 cur_state = val & DSI_PLL_VCO_EN;
313 I915_STATE_WARN(cur_state != state,
314 "DSI PLL state assertion failure (expected %s, current %s)\n",
315 onoff(state), onoff(cur_state));
318 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
319 enum pipe pipe, bool state)
323 if (HAS_DDI(dev_priv)) {
325 * DDI does not have a specific FDI_TX register.
327 * FDI is never fed from EDP transcoder
328 * so pipe->transcoder cast is fine here.
330 enum transcoder cpu_transcoder = (enum transcoder)pipe;
331 u32 val = intel_de_read(dev_priv,
332 TRANS_DDI_FUNC_CTL(cpu_transcoder));
333 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
335 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
336 cur_state = !!(val & FDI_TX_ENABLE);
338 I915_STATE_WARN(cur_state != state,
339 "FDI TX state assertion failure (expected %s, current %s)\n",
340 onoff(state), onoff(cur_state));
342 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
343 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
345 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
346 enum pipe pipe, bool state)
351 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
352 cur_state = !!(val & FDI_RX_ENABLE);
353 I915_STATE_WARN(cur_state != state,
354 "FDI RX state assertion failure (expected %s, current %s)\n",
355 onoff(state), onoff(cur_state));
357 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
358 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
360 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
365 /* ILK FDI PLL is always enabled */
366 if (IS_IRONLAKE(dev_priv))
369 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
370 if (HAS_DDI(dev_priv))
373 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
374 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
377 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
378 enum pipe pipe, bool state)
383 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
384 cur_state = !!(val & FDI_RX_PLL_ENABLE);
385 I915_STATE_WARN(cur_state != state,
386 "FDI RX PLL assertion failure (expected %s, current %s)\n",
387 onoff(state), onoff(cur_state));
390 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
394 enum pipe panel_pipe = INVALID_PIPE;
397 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
400 if (HAS_PCH_SPLIT(dev_priv)) {
403 pp_reg = PP_CONTROL(0);
404 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
407 case PANEL_PORT_SELECT_LVDS:
408 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
410 case PANEL_PORT_SELECT_DPA:
411 g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
413 case PANEL_PORT_SELECT_DPC:
414 g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
416 case PANEL_PORT_SELECT_DPD:
417 g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
420 MISSING_CASE(port_sel);
423 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
424 /* presumably write lock depends on pipe, not port select */
425 pp_reg = PP_CONTROL(pipe);
430 pp_reg = PP_CONTROL(0);
431 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
433 drm_WARN_ON(&dev_priv->drm,
434 port_sel != PANEL_PORT_SELECT_LVDS);
435 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
438 val = intel_de_read(dev_priv, pp_reg);
439 if (!(val & PANEL_POWER_ON) ||
440 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
443 I915_STATE_WARN(panel_pipe == pipe && locked,
444 "panel assertion failure, pipe %c regs locked\n",
448 void assert_pipe(struct drm_i915_private *dev_priv,
449 enum transcoder cpu_transcoder, bool state)
452 enum intel_display_power_domain power_domain;
453 intel_wakeref_t wakeref;
455 /* we keep both pipes enabled on 830 */
456 if (IS_I830(dev_priv))
459 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
460 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
462 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
463 cur_state = !!(val & PIPECONF_ENABLE);
465 intel_display_power_put(dev_priv, power_domain, wakeref);
470 I915_STATE_WARN(cur_state != state,
471 "transcoder %s assertion failure (expected %s, current %s)\n",
472 transcoder_name(cpu_transcoder),
473 onoff(state), onoff(cur_state));
476 static void assert_plane(struct intel_plane *plane, bool state)
481 cur_state = plane->get_hw_state(plane, &pipe);
483 I915_STATE_WARN(cur_state != state,
484 "%s assertion failure (expected %s, current %s)\n",
485 plane->base.name, onoff(state), onoff(cur_state));
488 #define assert_plane_enabled(p) assert_plane(p, true)
489 #define assert_plane_disabled(p) assert_plane(p, false)
491 static void assert_planes_disabled(struct intel_crtc *crtc)
493 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
494 struct intel_plane *plane;
496 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
497 assert_plane_disabled(plane);
500 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
506 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
507 enabled = !!(val & TRANS_ENABLE);
508 I915_STATE_WARN(enabled,
509 "transcoder assertion failed, should be off on pipe %c but is still active\n",
513 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
514 enum pipe pipe, enum port port,
520 state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
522 I915_STATE_WARN(state && port_pipe == pipe,
523 "PCH DP %c enabled on transcoder %c, should be disabled\n",
524 port_name(port), pipe_name(pipe));
526 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
527 "IBX PCH DP %c still using transcoder B\n",
531 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
532 enum pipe pipe, enum port port,
538 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
540 I915_STATE_WARN(state && port_pipe == pipe,
541 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
542 port_name(port), pipe_name(pipe));
544 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
545 "IBX PCH HDMI %c still using transcoder B\n",
549 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
554 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
555 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
556 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
558 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
560 "PCH VGA enabled on transcoder %c, should be disabled\n",
563 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
565 "PCH LVDS enabled on transcoder %c, should be disabled\n",
568 /* PCH SDVOB multiplex with HDMIB */
569 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
570 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
571 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
574 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
575 struct intel_digital_port *dig_port,
576 unsigned int expected_mask)
581 switch (dig_port->base.port) {
583 port_mask = DPLL_PORTB_READY_MASK;
587 port_mask = DPLL_PORTC_READY_MASK;
592 port_mask = DPLL_PORTD_READY_MASK;
593 dpll_reg = DPIO_PHY_STATUS;
599 if (intel_de_wait_for_register(dev_priv, dpll_reg,
600 port_mask, expected_mask, 1000))
601 drm_WARN(&dev_priv->drm, 1,
602 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
603 dig_port->base.base.base.id, dig_port->base.base.name,
604 intel_de_read(dev_priv, dpll_reg) & port_mask,
608 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
610 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
611 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
612 enum pipe pipe = crtc->pipe;
614 u32 val, pipeconf_val;
616 /* Make sure PCH DPLL is enabled */
617 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
619 /* FDI must be feeding us bits for PCH ports */
620 assert_fdi_tx_enabled(dev_priv, pipe);
621 assert_fdi_rx_enabled(dev_priv, pipe);
623 if (HAS_PCH_CPT(dev_priv)) {
624 reg = TRANS_CHICKEN2(pipe);
625 val = intel_de_read(dev_priv, reg);
627 * Workaround: Set the timing override bit
628 * before enabling the pch transcoder.
630 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
631 /* Configure frame start delay to match the CPU */
632 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
633 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
634 intel_de_write(dev_priv, reg, val);
637 reg = PCH_TRANSCONF(pipe);
638 val = intel_de_read(dev_priv, reg);
639 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
641 if (HAS_PCH_IBX(dev_priv)) {
642 /* Configure frame start delay to match the CPU */
643 val &= ~TRANS_FRAME_START_DELAY_MASK;
644 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
647 * Make the BPC in transcoder be consistent with
648 * that in pipeconf reg. For HDMI we must use 8bpc
649 * here for both 8bpc and 12bpc.
651 val &= ~PIPECONF_BPC_MASK;
652 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
653 val |= PIPECONF_8BPC;
655 val |= pipeconf_val & PIPECONF_BPC_MASK;
658 val &= ~TRANS_INTERLACE_MASK;
659 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
660 if (HAS_PCH_IBX(dev_priv) &&
661 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
662 val |= TRANS_LEGACY_INTERLACED_ILK;
664 val |= TRANS_INTERLACED;
666 val |= TRANS_PROGRESSIVE;
669 intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
670 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
671 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
675 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
676 enum transcoder cpu_transcoder)
678 u32 val, pipeconf_val;
680 /* FDI must be feeding us bits for PCH ports */
681 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
682 assert_fdi_rx_enabled(dev_priv, PIPE_A);
684 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
685 /* Workaround: set timing override bit. */
686 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
687 /* Configure frame start delay to match the CPU */
688 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
689 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
690 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
693 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
695 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
696 PIPECONF_INTERLACED_ILK)
697 val |= TRANS_INTERLACED;
699 val |= TRANS_PROGRESSIVE;
701 intel_de_write(dev_priv, LPT_TRANSCONF, val);
702 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
703 TRANS_STATE_ENABLE, 100))
704 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
707 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
713 /* FDI relies on the transcoder */
714 assert_fdi_tx_disabled(dev_priv, pipe);
715 assert_fdi_rx_disabled(dev_priv, pipe);
717 /* Ports must be off as well */
718 assert_pch_ports_disabled(dev_priv, pipe);
720 reg = PCH_TRANSCONF(pipe);
721 val = intel_de_read(dev_priv, reg);
722 val &= ~TRANS_ENABLE;
723 intel_de_write(dev_priv, reg, val);
724 /* wait for PCH transcoder off, transcoder state */
725 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
726 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
729 if (HAS_PCH_CPT(dev_priv)) {
730 /* Workaround: Clear the timing override chicken bit again. */
731 reg = TRANS_CHICKEN2(pipe);
732 val = intel_de_read(dev_priv, reg);
733 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
734 intel_de_write(dev_priv, reg, val);
738 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
742 val = intel_de_read(dev_priv, LPT_TRANSCONF);
743 val &= ~TRANS_ENABLE;
744 intel_de_write(dev_priv, LPT_TRANSCONF, val);
745 /* wait for PCH transcoder off, transcoder state */
746 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
747 TRANS_STATE_ENABLE, 50))
748 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
750 /* Workaround: clear timing override bit. */
751 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
752 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
753 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
756 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
758 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
760 if (HAS_PCH_LPT(dev_priv))
766 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
768 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
769 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
770 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
771 enum pipe pipe = crtc->pipe;
775 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
777 assert_planes_disabled(crtc);
780 * A pipe without a PLL won't actually be able to drive bits from
781 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
784 if (HAS_GMCH(dev_priv)) {
785 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
786 assert_dsi_pll_enabled(dev_priv);
788 assert_pll_enabled(dev_priv, pipe);
790 if (new_crtc_state->has_pch_encoder) {
791 /* if driving the PCH, we need FDI enabled */
792 assert_fdi_rx_pll_enabled(dev_priv,
793 intel_crtc_pch_transcoder(crtc));
794 assert_fdi_tx_pll_enabled(dev_priv,
795 (enum pipe) cpu_transcoder);
797 /* FIXME: assert CPU port conditions for SNB+ */
800 reg = PIPECONF(cpu_transcoder);
801 val = intel_de_read(dev_priv, reg);
802 if (val & PIPECONF_ENABLE) {
803 /* we keep both pipes enabled on 830 */
804 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
808 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
809 intel_de_posting_read(dev_priv, reg);
812 * Until the pipe starts PIPEDSL reads will return a stale value,
813 * which causes an apparent vblank timestamp jump when PIPEDSL
814 * resets to its proper value. That also messes up the frame count
815 * when it's derived from the timestamps. So let's wait for the
816 * pipe to start properly before we call drm_crtc_vblank_on()
818 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
819 intel_wait_for_pipe_scanline_moving(crtc);
822 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
824 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
825 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
826 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
827 enum pipe pipe = crtc->pipe;
831 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
834 * Make sure planes won't keep trying to pump pixels to us,
835 * or we might hang the display.
837 assert_planes_disabled(crtc);
839 reg = PIPECONF(cpu_transcoder);
840 val = intel_de_read(dev_priv, reg);
841 if ((val & PIPECONF_ENABLE) == 0)
845 * Double wide has implications for planes
846 * so best keep it disabled when not needed.
848 if (old_crtc_state->double_wide)
849 val &= ~PIPECONF_DOUBLE_WIDE;
851 /* Don't disable pipe or pipe PLLs if needed */
852 if (!IS_I830(dev_priv))
853 val &= ~PIPECONF_ENABLE;
855 intel_de_write(dev_priv, reg, val);
856 if ((val & PIPECONF_ENABLE) == 0)
857 intel_wait_for_pipe_off(old_crtc_state);
861 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
864 return info->is_yuv &&
865 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
869 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
871 struct drm_i915_private *dev_priv = to_i915(fb->dev);
872 unsigned int cpp = fb->format->cpp[color_plane];
874 switch (fb->modifier) {
875 case DRM_FORMAT_MOD_LINEAR:
876 return intel_tile_size(dev_priv);
877 case I915_FORMAT_MOD_X_TILED:
878 if (DISPLAY_VER(dev_priv) == 2)
882 case I915_FORMAT_MOD_Y_TILED_CCS:
883 if (is_ccs_plane(fb, color_plane))
886 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
887 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
888 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
889 if (is_ccs_plane(fb, color_plane))
892 case I915_FORMAT_MOD_Y_TILED:
893 if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
897 case I915_FORMAT_MOD_Yf_TILED_CCS:
898 if (is_ccs_plane(fb, color_plane))
901 case I915_FORMAT_MOD_Yf_TILED:
917 MISSING_CASE(fb->modifier);
923 intel_fb_align_height(const struct drm_framebuffer *fb,
924 int color_plane, unsigned int height)
926 unsigned int tile_height = intel_tile_height(fb, color_plane);
928 return ALIGN(height, tile_height);
931 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
933 unsigned int size = 0;
936 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
937 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
942 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
944 unsigned int size = 0;
947 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
948 size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
953 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
955 if (DISPLAY_VER(dev_priv) >= 9)
957 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
958 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
960 else if (DISPLAY_VER(dev_priv) >= 4)
966 static bool has_async_flips(struct drm_i915_private *i915)
968 return DISPLAY_VER(i915) >= 5;
971 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
974 struct drm_i915_private *dev_priv = to_i915(fb->dev);
976 /* AUX_DIST needs only 4K alignment */
977 if ((DISPLAY_VER(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
978 is_ccs_plane(fb, color_plane))
981 switch (fb->modifier) {
982 case DRM_FORMAT_MOD_LINEAR:
983 return intel_linear_alignment(dev_priv);
984 case I915_FORMAT_MOD_X_TILED:
985 if (has_async_flips(dev_priv))
988 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
989 if (is_semiplanar_uv_plane(fb, color_plane))
990 return intel_tile_row_size(fb, color_plane);
992 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
993 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
995 case I915_FORMAT_MOD_Y_TILED_CCS:
996 case I915_FORMAT_MOD_Yf_TILED_CCS:
997 case I915_FORMAT_MOD_Y_TILED:
998 if (DISPLAY_VER(dev_priv) >= 12 &&
999 is_semiplanar_uv_plane(fb, color_plane))
1000 return intel_tile_row_size(fb, color_plane);
1002 case I915_FORMAT_MOD_Yf_TILED:
1003 return 1 * 1024 * 1024;
1005 MISSING_CASE(fb->modifier);
1010 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1012 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1013 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1015 return DISPLAY_VER(dev_priv) < 4 ||
1017 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1021 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1023 const struct i915_ggtt_view *view,
1025 unsigned long *out_flags)
1027 struct drm_device *dev = fb->dev;
1028 struct drm_i915_private *dev_priv = to_i915(dev);
1029 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1030 intel_wakeref_t wakeref;
1031 struct i915_gem_ww_ctx ww;
1032 struct i915_vma *vma;
1033 unsigned int pinctl;
1037 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1038 return ERR_PTR(-EINVAL);
1041 alignment = intel_cursor_alignment(dev_priv);
1043 alignment = intel_surf_alignment(fb, 0);
1044 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1045 return ERR_PTR(-EINVAL);
1047 /* Note that the w/a also requires 64 PTE of padding following the
1048 * bo. We currently fill all unused PTE with the shadow page and so
1049 * we should always have valid PTE following the scanout preventing
1052 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1053 alignment = 256 * 1024;
1056 * Global gtt pte registers are special registers which actually forward
1057 * writes to a chunk of system memory. Which means that there is no risk
1058 * that the register values disappear as soon as we call
1059 * intel_runtime_pm_put(), so it is correct to wrap only the
1060 * pin/unpin/fence and not more.
1062 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1064 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1067 * Valleyview is definitely limited to scanning out the first
1068 * 512MiB. Lets presume this behaviour was inherited from the
1069 * g4x display engine and that all earlier gen are similarly
1070 * limited. Testing suggests that it is a little more
1071 * complicated than this. For example, Cherryview appears quite
1072 * happy to scanout from anywhere within its global aperture.
1075 if (HAS_GMCH(dev_priv))
1076 pinctl |= PIN_MAPPABLE;
1078 i915_gem_ww_ctx_init(&ww, true);
1080 ret = i915_gem_object_lock(obj, &ww);
1081 if (!ret && phys_cursor)
1082 ret = i915_gem_object_attach_phys(obj, alignment);
1084 ret = i915_gem_object_pin_pages(obj);
1089 vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1097 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1099 * Install a fence for tiled scan-out. Pre-i965 always needs a
1100 * fence, whereas 965+ only requires a fence if using
1101 * framebuffer compression. For simplicity, we always, when
1102 * possible, install a fence as the cost is not that onerous.
1104 * If we fail to fence the tiled scanout, then either the
1105 * modeset will reject the change (which is highly unlikely as
1106 * the affected systems, all but one, do not have unmappable
1107 * space) or we will not be able to enable full powersaving
1108 * techniques (also likely not to apply due to various limits
1109 * FBC and the like impose on the size of the buffer, which
1110 * presumably we violated anyway with this unmappable buffer).
1111 * Anyway, it is presumably better to stumble onwards with
1112 * something and try to run the system in a "less than optimal"
1113 * mode that matches the user configuration.
1115 ret = i915_vma_pin_fence(vma);
1116 if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1117 i915_vma_unpin(vma);
1123 *out_flags |= PLANE_HAS_FENCE;
1129 i915_gem_object_unpin_pages(obj);
1131 if (ret == -EDEADLK) {
1132 ret = i915_gem_ww_ctx_backoff(&ww);
1136 i915_gem_ww_ctx_fini(&ww);
1140 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1141 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1145 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1147 if (flags & PLANE_HAS_FENCE)
1148 i915_vma_unpin_fence(vma);
1149 i915_vma_unpin(vma);
1154 * Convert the x/y offsets into a linear offset.
1155 * Only valid with 0/180 degree rotation, which is fine since linear
1156 * offset is only used with linear buffers on pre-hsw and tiled buffers
1157 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1159 u32 intel_fb_xy_to_linear(int x, int y,
1160 const struct intel_plane_state *state,
1163 const struct drm_framebuffer *fb = state->hw.fb;
1164 unsigned int cpp = fb->format->cpp[color_plane];
1165 unsigned int pitch = state->view.color_plane[color_plane].stride;
1167 return y * pitch + x * cpp;
1171 * Add the x/y offsets derived from fb->offsets[] to the user
1172 * specified plane src x/y offsets. The resulting x/y offsets
1173 * specify the start of scanout from the beginning of the gtt mapping.
1175 void intel_add_fb_offsets(int *x, int *y,
1176 const struct intel_plane_state *state,
1180 *x += state->view.color_plane[color_plane].x;
1181 *y += state->view.color_plane[color_plane].y;
1184 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1186 switch (fb_modifier) {
1187 case I915_FORMAT_MOD_X_TILED:
1188 return I915_TILING_X;
1189 case I915_FORMAT_MOD_Y_TILED:
1190 case I915_FORMAT_MOD_Y_TILED_CCS:
1191 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1192 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1193 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1194 return I915_TILING_Y;
1196 return I915_TILING_NONE;
1201 * From the Sky Lake PRM:
1202 * "The Color Control Surface (CCS) contains the compression status of
1203 * the cache-line pairs. The compression state of the cache-line pair
1204 * is specified by 2 bits in the CCS. Each CCS cache-line represents
1205 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1206 * cache-line-pairs. CCS is always Y tiled."
1208 * Since cache line pairs refers to horizontally adjacent cache lines,
1209 * each cache line in the CCS corresponds to an area of 32x16 cache
1210 * lines on the main surface. Since each pixel is 4 bytes, this gives
1211 * us a ratio of one byte in the CCS for each 8x16 pixels in the
1214 static const struct drm_format_info skl_ccs_formats[] = {
1215 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1216 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1217 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1218 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1219 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1220 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1221 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1222 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1226 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1227 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1228 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1229 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1232 static const struct drm_format_info gen12_ccs_formats[] = {
1233 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1234 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1235 .hsub = 1, .vsub = 1, },
1236 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1237 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1238 .hsub = 1, .vsub = 1, },
1239 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1240 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1241 .hsub = 1, .vsub = 1, .has_alpha = true },
1242 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1243 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1244 .hsub = 1, .vsub = 1, .has_alpha = true },
1245 { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1246 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1247 .hsub = 2, .vsub = 1, .is_yuv = true },
1248 { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1249 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1250 .hsub = 2, .vsub = 1, .is_yuv = true },
1251 { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1252 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1253 .hsub = 2, .vsub = 1, .is_yuv = true },
1254 { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1255 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1256 .hsub = 2, .vsub = 1, .is_yuv = true },
1257 { .format = DRM_FORMAT_NV12, .num_planes = 4,
1258 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1259 .hsub = 2, .vsub = 2, .is_yuv = true },
1260 { .format = DRM_FORMAT_P010, .num_planes = 4,
1261 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1262 .hsub = 2, .vsub = 2, .is_yuv = true },
1263 { .format = DRM_FORMAT_P012, .num_planes = 4,
1264 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1265 .hsub = 2, .vsub = 2, .is_yuv = true },
1266 { .format = DRM_FORMAT_P016, .num_planes = 4,
1267 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1268 .hsub = 2, .vsub = 2, .is_yuv = true },
1272 * Same as gen12_ccs_formats[] above, but with additional surface used
1273 * to pass Clear Color information in plane 2 with 64 bits of data.
1275 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1276 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1277 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1278 .hsub = 1, .vsub = 1, },
1279 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1280 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1281 .hsub = 1, .vsub = 1, },
1282 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1283 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1284 .hsub = 1, .vsub = 1, .has_alpha = true },
1285 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1286 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1287 .hsub = 1, .vsub = 1, .has_alpha = true },
1290 static const struct drm_format_info *
1291 lookup_format_info(const struct drm_format_info formats[],
1292 int num_formats, u32 format)
1296 for (i = 0; i < num_formats; i++) {
1297 if (formats[i].format == format)
1304 static const struct drm_format_info *
1305 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1307 switch (cmd->modifier[0]) {
1308 case I915_FORMAT_MOD_Y_TILED_CCS:
1309 case I915_FORMAT_MOD_Yf_TILED_CCS:
1310 return lookup_format_info(skl_ccs_formats,
1311 ARRAY_SIZE(skl_ccs_formats),
1313 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1314 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1315 return lookup_format_info(gen12_ccs_formats,
1316 ARRAY_SIZE(gen12_ccs_formats),
1318 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1319 return lookup_format_info(gen12_ccs_cc_formats,
1320 ARRAY_SIZE(gen12_ccs_cc_formats),
1327 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1329 return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1333 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1334 u32 pixel_format, u64 modifier)
1336 struct intel_crtc *crtc;
1337 struct intel_plane *plane;
1340 * We assume the primary plane for pipe A has
1341 * the highest stride limits of them all,
1342 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1344 crtc = intel_get_first_crtc(dev_priv);
1348 plane = to_intel_plane(crtc->base.primary);
1350 return plane->max_stride(plane, pixel_format, modifier,
1355 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1356 u32 pixel_format, u64 modifier)
1359 * Arbitrary limit for gen4+ chosen to match the
1360 * render engine max stride.
1362 * The new CCS hash mode makes remapping impossible
1364 if (!is_ccs_modifier(modifier)) {
1365 if (DISPLAY_VER(dev_priv) >= 7)
1367 else if (DISPLAY_VER(dev_priv) >= 4)
1371 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1375 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1377 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1380 if (is_surface_linear(fb, color_plane)) {
1381 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1386 * To make remapping with linear generally feasible
1387 * we need the stride to be page aligned.
1389 if (fb->pitches[color_plane] > max_stride &&
1390 !is_ccs_modifier(fb->modifier))
1391 return intel_tile_size(dev_priv);
1396 tile_width = intel_tile_width_bytes(fb, color_plane);
1397 if (is_ccs_modifier(fb->modifier)) {
1399 * Display WA #0531: skl,bxt,kbl,glk
1401 * Render decompression and plane width > 3840
1402 * combined with horizontal panning requires the
1403 * plane stride to be a multiple of 4. We'll just
1404 * require the entire fb to accommodate that to avoid
1405 * potential runtime errors at plane configuration time.
1407 if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
1408 color_plane == 0 && fb->width > 3840)
1411 * The main surface pitch must be padded to a multiple of four
1414 else if (DISPLAY_VER(dev_priv) >= 12)
1420 static struct i915_vma *
1421 initial_plane_vma(struct drm_i915_private *i915,
1422 struct intel_initial_plane_config *plane_config)
1424 struct drm_i915_gem_object *obj;
1425 struct i915_vma *vma;
1428 if (plane_config->size == 0)
1431 base = round_down(plane_config->base,
1432 I915_GTT_MIN_ALIGNMENT);
1433 size = round_up(plane_config->base + plane_config->size,
1434 I915_GTT_MIN_ALIGNMENT);
1438 * If the FB is too big, just don't use it since fbdev is not very
1439 * important and we should probably use that space with FBC or other
1442 if (size * 2 > i915->stolen_usable_size)
1445 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1450 * Mark it WT ahead of time to avoid changing the
1451 * cache_level during fbdev initialization. The
1452 * unbind there would get stuck waiting for rcu.
1454 i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1455 I915_CACHE_WT : I915_CACHE_NONE);
1457 switch (plane_config->tiling) {
1458 case I915_TILING_NONE:
1462 obj->tiling_and_stride =
1463 plane_config->fb->base.pitches[0] |
1464 plane_config->tiling;
1467 MISSING_CASE(plane_config->tiling);
1471 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1475 if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1478 if (i915_gem_object_is_tiled(obj) &&
1479 !i915_vma_is_map_and_fenceable(vma))
1485 i915_gem_object_put(obj);
1490 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1491 struct intel_initial_plane_config *plane_config)
1493 struct drm_device *dev = crtc->base.dev;
1494 struct drm_i915_private *dev_priv = to_i915(dev);
1495 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1496 struct drm_framebuffer *fb = &plane_config->fb->base;
1497 struct i915_vma *vma;
1499 switch (fb->modifier) {
1500 case DRM_FORMAT_MOD_LINEAR:
1501 case I915_FORMAT_MOD_X_TILED:
1502 case I915_FORMAT_MOD_Y_TILED:
1505 drm_dbg(&dev_priv->drm,
1506 "Unsupported modifier for initial FB: 0x%llx\n",
1511 vma = initial_plane_vma(dev_priv, plane_config);
1515 mode_cmd.pixel_format = fb->format->format;
1516 mode_cmd.width = fb->width;
1517 mode_cmd.height = fb->height;
1518 mode_cmd.pitches[0] = fb->pitches[0];
1519 mode_cmd.modifier[0] = fb->modifier;
1520 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1522 if (intel_framebuffer_init(to_intel_framebuffer(fb),
1523 vma->obj, &mode_cmd)) {
1524 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1528 plane_config->vma = vma;
1537 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1538 struct intel_plane_state *plane_state,
1541 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1543 plane_state->uapi.visible = visible;
1546 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1548 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1551 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1553 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1554 struct drm_plane *plane;
1557 * Active_planes aliases if multiple "primary" or cursor planes
1558 * have been used on the same (or wrong) pipe. plane_mask uses
1559 * unique ids, hence we can use that to reconstruct active_planes.
1561 crtc_state->enabled_planes = 0;
1562 crtc_state->active_planes = 0;
1564 drm_for_each_plane_mask(plane, &dev_priv->drm,
1565 crtc_state->uapi.plane_mask) {
1566 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1567 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1571 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1572 struct intel_plane *plane)
1574 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1575 struct intel_crtc_state *crtc_state =
1576 to_intel_crtc_state(crtc->base.state);
1577 struct intel_plane_state *plane_state =
1578 to_intel_plane_state(plane->base.state);
1580 drm_dbg_kms(&dev_priv->drm,
1581 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1582 plane->base.base.id, plane->base.name,
1583 crtc->base.base.id, crtc->base.name);
1585 intel_set_plane_visible(crtc_state, plane_state, false);
1586 fixup_plane_bitmasks(crtc_state);
1587 crtc_state->data_rate[plane->id] = 0;
1588 crtc_state->min_cdclk[plane->id] = 0;
1590 if (plane->id == PLANE_PRIMARY)
1591 hsw_disable_ips(crtc_state);
1594 * Vblank time updates from the shadow to live plane control register
1595 * are blocked if the memory self-refresh mode is active at that
1596 * moment. So to make sure the plane gets truly disabled, disable
1597 * first the self-refresh mode. The self-refresh enable bit in turn
1598 * will be checked/applied by the HW only at the next frame start
1599 * event which is after the vblank start event, so we need to have a
1600 * wait-for-vblank between disabling the plane and the pipe.
1602 if (HAS_GMCH(dev_priv) &&
1603 intel_set_memory_cxsr(dev_priv, false))
1604 intel_wait_for_vblank(dev_priv, crtc->pipe);
1607 * Gen2 reports pipe underruns whenever all planes are disabled.
1608 * So disable underrun reporting before all the planes get disabled.
1610 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1611 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1613 intel_disable_plane(plane, crtc_state);
1614 intel_wait_for_vblank(dev_priv, crtc->pipe);
1618 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
1619 struct intel_initial_plane_config *plane_config)
1621 struct drm_device *dev = intel_crtc->base.dev;
1622 struct drm_i915_private *dev_priv = to_i915(dev);
1624 struct drm_plane *primary = intel_crtc->base.primary;
1625 struct drm_plane_state *plane_state = primary->state;
1626 struct intel_plane *intel_plane = to_intel_plane(primary);
1627 struct intel_plane_state *intel_state =
1628 to_intel_plane_state(plane_state);
1629 struct intel_crtc_state *crtc_state =
1630 to_intel_crtc_state(intel_crtc->base.state);
1631 struct drm_framebuffer *fb;
1632 struct i915_vma *vma;
1636 * Disable planes if get_initial_plane_config() failed.
1637 * Make sure things work if the surface base is not page aligned.
1639 if (!plane_config->fb)
1642 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
1643 fb = &plane_config->fb->base;
1644 vma = plane_config->vma;
1649 * Failed to alloc the obj, check to see if we should share
1650 * an fb with another CRTC instead
1652 for_each_crtc(dev, c) {
1653 struct intel_plane_state *state;
1655 if (c == &intel_crtc->base)
1658 if (!to_intel_crtc_state(c->state)->uapi.active)
1661 state = to_intel_plane_state(c->primary->state);
1665 if (intel_plane_ggtt_offset(state) == plane_config->base) {
1673 * We've failed to reconstruct the BIOS FB. Current display state
1674 * indicates that the primary plane is visible, but has a NULL FB,
1675 * which will lead to problems later if we don't fix it up. The
1676 * simplest solution is to just disable the primary plane now and
1677 * pretend the BIOS never had it enabled.
1679 intel_plane_disable_noatomic(intel_crtc, intel_plane);
1680 if (crtc_state->bigjoiner) {
1681 struct intel_crtc *slave =
1682 crtc_state->bigjoiner_linked_crtc;
1683 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1689 plane_state->rotation = plane_config->rotation;
1690 intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
1691 &intel_state->view);
1693 __i915_vma_pin(vma);
1694 intel_state->vma = i915_vma_get(vma);
1695 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
1697 intel_state->flags |= PLANE_HAS_FENCE;
1699 plane_state->src_x = 0;
1700 plane_state->src_y = 0;
1701 plane_state->src_w = fb->width << 16;
1702 plane_state->src_h = fb->height << 16;
1704 plane_state->crtc_x = 0;
1705 plane_state->crtc_y = 0;
1706 plane_state->crtc_w = fb->width;
1707 plane_state->crtc_h = fb->height;
1709 if (plane_config->tiling)
1710 dev_priv->preserve_bios_swizzle = true;
1712 plane_state->fb = fb;
1713 drm_framebuffer_get(fb);
1715 plane_state->crtc = &intel_crtc->base;
1716 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
1719 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1721 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
1722 &to_intel_frontbuffer(fb)->bits);
1726 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1730 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1731 plane_state->view.color_plane[0].offset, 0);
1737 __intel_display_resume(struct drm_device *dev,
1738 struct drm_atomic_state *state,
1739 struct drm_modeset_acquire_ctx *ctx)
1741 struct drm_crtc_state *crtc_state;
1742 struct drm_crtc *crtc;
1745 intel_modeset_setup_hw_state(dev, ctx);
1746 intel_vga_redisable(to_i915(dev));
1752 * We've duplicated the state, pointers to the old state are invalid.
1754 * Don't attempt to use the old state until we commit the duplicated state.
1756 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1758 * Force recalculation even if we restore
1759 * current state. With fast modeset this may not result
1760 * in a modeset when the state is compatible.
1762 crtc_state->mode_changed = true;
1765 /* ignore any reset values/BIOS leftovers in the WM registers */
1766 if (!HAS_GMCH(to_i915(dev)))
1767 to_intel_atomic_state(state)->skip_intermediate_wm = true;
1769 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1771 drm_WARN_ON(dev, ret == -EDEADLK);
1775 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1777 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1778 intel_has_gpu_reset(&dev_priv->gt));
1781 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1783 struct drm_device *dev = &dev_priv->drm;
1784 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1785 struct drm_atomic_state *state;
1788 if (!HAS_DISPLAY(dev_priv))
1791 /* reset doesn't touch the display */
1792 if (!dev_priv->params.force_reset_modeset_test &&
1793 !gpu_reset_clobbers_display(dev_priv))
1796 /* We have a modeset vs reset deadlock, defensively unbreak it. */
1797 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1798 smp_mb__after_atomic();
1799 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1801 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1802 drm_dbg_kms(&dev_priv->drm,
1803 "Modeset potentially stuck, unbreaking through wedging\n");
1804 intel_gt_set_wedged(&dev_priv->gt);
1808 * Need mode_config.mutex so that we don't
1809 * trample ongoing ->detect() and whatnot.
1811 mutex_lock(&dev->mode_config.mutex);
1812 drm_modeset_acquire_init(ctx, 0);
1814 ret = drm_modeset_lock_all_ctx(dev, ctx);
1815 if (ret != -EDEADLK)
1818 drm_modeset_backoff(ctx);
1821 * Disabling the crtcs gracefully seems nicer. Also the
1822 * g33 docs say we should at least disable all the planes.
1824 state = drm_atomic_helper_duplicate_state(dev, ctx);
1825 if (IS_ERR(state)) {
1826 ret = PTR_ERR(state);
1827 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1832 ret = drm_atomic_helper_disable_all(dev, ctx);
1834 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1836 drm_atomic_state_put(state);
1840 dev_priv->modeset_restore_state = state;
1841 state->acquire_ctx = ctx;
1844 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1846 struct drm_device *dev = &dev_priv->drm;
1847 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1848 struct drm_atomic_state *state;
1851 if (!HAS_DISPLAY(dev_priv))
1854 /* reset doesn't touch the display */
1855 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1858 state = fetch_and_zero(&dev_priv->modeset_restore_state);
1862 /* reset doesn't touch the display */
1863 if (!gpu_reset_clobbers_display(dev_priv)) {
1864 /* for testing only restore the display */
1865 ret = __intel_display_resume(dev, state, ctx);
1867 drm_err(&dev_priv->drm,
1868 "Restoring old state failed with %i\n", ret);
1871 * The display has been reset as well,
1872 * so need a full re-initialization.
1874 intel_pps_unlock_regs_wa(dev_priv);
1875 intel_modeset_init_hw(dev_priv);
1876 intel_init_clock_gating(dev_priv);
1877 intel_hpd_init(dev_priv);
1879 ret = __intel_display_resume(dev, state, ctx);
1881 drm_err(&dev_priv->drm,
1882 "Restoring old state failed with %i\n", ret);
1884 intel_hpd_poll_disable(dev_priv);
1887 drm_atomic_state_put(state);
1889 drm_modeset_drop_locks(ctx);
1890 drm_modeset_acquire_fini(ctx);
1891 mutex_unlock(&dev->mode_config.mutex);
1893 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1896 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
1898 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1899 enum pipe pipe = crtc->pipe;
1902 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1905 * Display WA #1153: icl
1906 * enable hardware to bypass the alpha math
1907 * and rounding for per-pixel values 00 and 0xff
1909 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1911 * Display WA # 1605353570: icl
1912 * Set the pixel rounding bit to 1 for allowing
1913 * passthrough of Frame buffer pixels unmodified
1916 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1917 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1920 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1922 struct drm_crtc *crtc;
1925 drm_for_each_crtc(crtc, &dev_priv->drm) {
1926 struct drm_crtc_commit *commit;
1927 spin_lock(&crtc->commit_lock);
1928 commit = list_first_entry_or_null(&crtc->commit_list,
1929 struct drm_crtc_commit, commit_entry);
1930 cleanup_done = commit ?
1931 try_wait_for_completion(&commit->cleanup_done) : true;
1932 spin_unlock(&crtc->commit_lock);
1937 drm_crtc_wait_one_vblank(crtc);
1945 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
1949 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
1951 mutex_lock(&dev_priv->sb_lock);
1953 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1954 temp |= SBI_SSCCTL_DISABLE;
1955 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1957 mutex_unlock(&dev_priv->sb_lock);
1960 /* Program iCLKIP clock to the desired frequency */
1961 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
1963 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1964 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1965 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1966 u32 divsel, phaseinc, auxdiv, phasedir = 0;
1969 lpt_disable_iclkip(dev_priv);
1971 /* The iCLK virtual clock root frequency is in MHz,
1972 * but the adjusted_mode->crtc_clock in in KHz. To get the
1973 * divisors, it is necessary to divide one by another, so we
1974 * convert the virtual clock precision to KHz here for higher
1977 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
1978 u32 iclk_virtual_root_freq = 172800 * 1000;
1979 u32 iclk_pi_range = 64;
1980 u32 desired_divisor;
1982 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1984 divsel = (desired_divisor / iclk_pi_range) - 2;
1985 phaseinc = desired_divisor % iclk_pi_range;
1988 * Near 20MHz is a corner case which is
1989 * out of range for the 7-bit divisor
1995 /* This should not happen with any sane values */
1996 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
1997 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
1998 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
1999 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2001 drm_dbg_kms(&dev_priv->drm,
2002 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2003 clock, auxdiv, divsel, phasedir, phaseinc);
2005 mutex_lock(&dev_priv->sb_lock);
2007 /* Program SSCDIVINTPHASE6 */
2008 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2009 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2010 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2011 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2012 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2013 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2014 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2015 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2017 /* Program SSCAUXDIV */
2018 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2019 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2020 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2021 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2023 /* Enable modulator and associated divider */
2024 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2025 temp &= ~SBI_SSCCTL_DISABLE;
2026 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2028 mutex_unlock(&dev_priv->sb_lock);
2030 /* Wait for initialization time */
2033 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2036 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2038 u32 divsel, phaseinc, auxdiv;
2039 u32 iclk_virtual_root_freq = 172800 * 1000;
2040 u32 iclk_pi_range = 64;
2041 u32 desired_divisor;
2044 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2047 mutex_lock(&dev_priv->sb_lock);
2049 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2050 if (temp & SBI_SSCCTL_DISABLE) {
2051 mutex_unlock(&dev_priv->sb_lock);
2055 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2056 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2057 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2058 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2059 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2061 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2062 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2063 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2065 mutex_unlock(&dev_priv->sb_lock);
2067 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2069 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2070 desired_divisor << auxdiv);
2073 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2074 enum pipe pch_transcoder)
2076 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2077 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2078 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2080 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2081 intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2082 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2083 intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2084 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2085 intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2087 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2088 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2089 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2090 intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2091 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2092 intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2093 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2094 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2097 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2101 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2102 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2105 drm_WARN_ON(&dev_priv->drm,
2106 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2108 drm_WARN_ON(&dev_priv->drm,
2109 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2112 temp &= ~FDI_BC_BIFURCATION_SELECT;
2114 temp |= FDI_BC_BIFURCATION_SELECT;
2116 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2117 enable ? "en" : "dis");
2118 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2119 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2122 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2124 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2127 switch (crtc->pipe) {
2131 if (crtc_state->fdi_lanes > 2)
2132 cpt_set_fdi_bc_bifurcation(dev_priv, false);
2134 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2138 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2147 * Finds the encoder associated with the given CRTC. This can only be
2148 * used when we know that the CRTC isn't feeding multiple encoders!
2150 struct intel_encoder *
2151 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2152 const struct intel_crtc_state *crtc_state)
2154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2155 const struct drm_connector_state *connector_state;
2156 const struct drm_connector *connector;
2157 struct intel_encoder *encoder = NULL;
2158 int num_encoders = 0;
2161 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2162 if (connector_state->crtc != &crtc->base)
2165 encoder = to_intel_encoder(connector_state->best_encoder);
2169 drm_WARN(encoder->base.dev, num_encoders != 1,
2170 "%d encoders for pipe %c\n",
2171 num_encoders, pipe_name(crtc->pipe));
2177 * Enable PCH resources required for PCH ports:
2179 * - FDI training & RX/TX
2180 * - update transcoder timings
2181 * - DP transcoding bits
2184 static void ilk_pch_enable(const struct intel_atomic_state *state,
2185 const struct intel_crtc_state *crtc_state)
2187 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2188 struct drm_device *dev = crtc->base.dev;
2189 struct drm_i915_private *dev_priv = to_i915(dev);
2190 enum pipe pipe = crtc->pipe;
2193 assert_pch_transcoder_disabled(dev_priv, pipe);
2195 if (IS_IVYBRIDGE(dev_priv))
2196 ivb_update_fdi_bc_bifurcation(crtc_state);
2198 /* Write the TU size bits before fdi link training, so that error
2199 * detection works. */
2200 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2201 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2203 /* For PCH output, training FDI link */
2204 dev_priv->display.fdi_link_train(crtc, crtc_state);
2206 /* We need to program the right clock selection before writing the pixel
2207 * mutliplier into the DPLL. */
2208 if (HAS_PCH_CPT(dev_priv)) {
2211 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2212 temp |= TRANS_DPLL_ENABLE(pipe);
2213 sel = TRANS_DPLLB_SEL(pipe);
2214 if (crtc_state->shared_dpll ==
2215 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2219 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2222 /* XXX: pch pll's can be enabled any time before we enable the PCH
2223 * transcoder, and we actually should do this to not upset any PCH
2224 * transcoder that already use the clock when we share it.
2226 * Note that enable_shared_dpll tries to do the right thing, but
2227 * get_shared_dpll unconditionally resets the pll - we need that to have
2228 * the right LVDS enable sequence. */
2229 intel_enable_shared_dpll(crtc_state);
2231 /* set transcoder timing, panel must allow it */
2232 assert_panel_unlocked(dev_priv, pipe);
2233 ilk_pch_transcoder_set_timings(crtc_state, pipe);
2235 intel_fdi_normal_train(crtc);
2237 /* For PCH DP, enable TRANS_DP_CTL */
2238 if (HAS_PCH_CPT(dev_priv) &&
2239 intel_crtc_has_dp_encoder(crtc_state)) {
2240 const struct drm_display_mode *adjusted_mode =
2241 &crtc_state->hw.adjusted_mode;
2242 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2243 i915_reg_t reg = TRANS_DP_CTL(pipe);
2246 temp = intel_de_read(dev_priv, reg);
2247 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2248 TRANS_DP_SYNC_MASK |
2250 temp |= TRANS_DP_OUTPUT_ENABLE;
2251 temp |= bpc << 9; /* same format but at 11:9 */
2253 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2254 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2255 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2256 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2258 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2259 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2260 temp |= TRANS_DP_PORT_SEL(port);
2262 intel_de_write(dev_priv, reg, temp);
2265 ilk_enable_pch_transcoder(crtc_state);
2268 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2270 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2271 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2272 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2274 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2276 lpt_program_iclkip(crtc_state);
2278 /* Set transcoder timing. */
2279 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2281 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2284 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2287 i915_reg_t dslreg = PIPEDSL(pipe);
2290 temp = intel_de_read(dev_priv, dslreg);
2292 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2293 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2294 drm_err(&dev_priv->drm,
2295 "mode set failed: pipe %c stuck\n",
2300 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2302 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2304 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2305 enum pipe pipe = crtc->pipe;
2306 int width = drm_rect_width(dst);
2307 int height = drm_rect_height(dst);
2311 if (!crtc_state->pch_pfit.enabled)
2314 /* Force use of hard-coded filter coefficients
2315 * as some pre-programmed values are broken,
2318 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2319 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2320 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2322 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2324 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2325 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2328 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2330 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2331 struct drm_device *dev = crtc->base.dev;
2332 struct drm_i915_private *dev_priv = to_i915(dev);
2334 if (!crtc_state->ips_enabled)
2338 * We can only enable IPS after we enable a plane and wait for a vblank
2339 * This function is called from post_plane_update, which is run after
2342 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2344 if (IS_BROADWELL(dev_priv)) {
2345 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2346 IPS_ENABLE | IPS_PCODE_CONTROL));
2347 /* Quoting Art Runyan: "its not safe to expect any particular
2348 * value in IPS_CTL bit 31 after enabling IPS through the
2349 * mailbox." Moreover, the mailbox may return a bogus state,
2350 * so we need to just enable it and continue on.
2353 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2354 /* The bit only becomes 1 in the next vblank, so this wait here
2355 * is essentially intel_wait_for_vblank. If we don't have this
2356 * and don't wait for vblanks until the end of crtc_enable, then
2357 * the HW state readout code will complain that the expected
2358 * IPS_CTL value is not the one we read. */
2359 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2360 drm_err(&dev_priv->drm,
2361 "Timed out waiting for IPS enable\n");
2365 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2368 struct drm_device *dev = crtc->base.dev;
2369 struct drm_i915_private *dev_priv = to_i915(dev);
2371 if (!crtc_state->ips_enabled)
2374 if (IS_BROADWELL(dev_priv)) {
2376 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2378 * Wait for PCODE to finish disabling IPS. The BSpec specified
2379 * 42ms timeout value leads to occasional timeouts so use 100ms
2382 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2383 drm_err(&dev_priv->drm,
2384 "Timed out waiting for IPS disable\n");
2386 intel_de_write(dev_priv, IPS_CTL, 0);
2387 intel_de_posting_read(dev_priv, IPS_CTL);
2390 /* We need to wait for a vblank before we can disable the plane. */
2391 intel_wait_for_vblank(dev_priv, crtc->pipe);
2394 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
2396 if (intel_crtc->overlay)
2397 (void) intel_overlay_switch_off(intel_crtc->overlay);
2399 /* Let userspace switch the overlay on again. In most cases userspace
2400 * has to recompute where to put it anyway.
2404 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2405 const struct intel_crtc_state *new_crtc_state)
2407 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2410 if (!old_crtc_state->ips_enabled)
2413 if (intel_crtc_needs_modeset(new_crtc_state))
2417 * Workaround : Do not read or write the pipe palette/gamma data while
2418 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2420 * Disable IPS before we program the LUT.
2422 if (IS_HASWELL(dev_priv) &&
2423 (new_crtc_state->uapi.color_mgmt_changed ||
2424 new_crtc_state->update_pipe) &&
2425 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2428 return !new_crtc_state->ips_enabled;
2431 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2432 const struct intel_crtc_state *new_crtc_state)
2434 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2435 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2437 if (!new_crtc_state->ips_enabled)
2440 if (intel_crtc_needs_modeset(new_crtc_state))
2444 * Workaround : Do not read or write the pipe palette/gamma data while
2445 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2447 * Re-enable IPS after the LUT has been programmed.
2449 if (IS_HASWELL(dev_priv) &&
2450 (new_crtc_state->uapi.color_mgmt_changed ||
2451 new_crtc_state->update_pipe) &&
2452 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2456 * We can't read out IPS on broadwell, assume the worst and
2457 * forcibly enable IPS on the first fastset.
2459 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2462 return !old_crtc_state->ips_enabled;
2465 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2467 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2469 if (!crtc_state->nv12_planes)
2472 /* WA Display #0827: Gen9:all */
2473 if (DISPLAY_VER(dev_priv) == 9)
2479 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2481 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2483 /* Wa_2006604312:icl,ehl */
2484 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2490 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2491 const struct intel_crtc_state *new_crtc_state)
2493 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2494 new_crtc_state->active_planes;
2497 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2498 const struct intel_crtc_state *new_crtc_state)
2500 return old_crtc_state->active_planes &&
2501 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2504 static void intel_post_plane_update(struct intel_atomic_state *state,
2505 struct intel_crtc *crtc)
2507 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2508 const struct intel_crtc_state *old_crtc_state =
2509 intel_atomic_get_old_crtc_state(state, crtc);
2510 const struct intel_crtc_state *new_crtc_state =
2511 intel_atomic_get_new_crtc_state(state, crtc);
2512 enum pipe pipe = crtc->pipe;
2514 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2516 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2517 intel_update_watermarks(crtc);
2519 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2520 hsw_enable_ips(new_crtc_state);
2522 intel_fbc_post_update(state, crtc);
2524 if (needs_nv12_wa(old_crtc_state) &&
2525 !needs_nv12_wa(new_crtc_state))
2526 skl_wa_827(dev_priv, pipe, false);
2528 if (needs_scalerclk_wa(old_crtc_state) &&
2529 !needs_scalerclk_wa(new_crtc_state))
2530 icl_wa_scalerclkgating(dev_priv, pipe, false);
2533 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2534 struct intel_crtc *crtc)
2536 const struct intel_crtc_state *crtc_state =
2537 intel_atomic_get_new_crtc_state(state, crtc);
2538 u8 update_planes = crtc_state->update_planes;
2539 const struct intel_plane_state *plane_state;
2540 struct intel_plane *plane;
2543 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2544 if (plane->enable_flip_done &&
2545 plane->pipe == crtc->pipe &&
2546 update_planes & BIT(plane->id))
2547 plane->enable_flip_done(plane);
2551 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2552 struct intel_crtc *crtc)
2554 const struct intel_crtc_state *crtc_state =
2555 intel_atomic_get_new_crtc_state(state, crtc);
2556 u8 update_planes = crtc_state->update_planes;
2557 const struct intel_plane_state *plane_state;
2558 struct intel_plane *plane;
2561 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2562 if (plane->disable_flip_done &&
2563 plane->pipe == crtc->pipe &&
2564 update_planes & BIT(plane->id))
2565 plane->disable_flip_done(plane);
2569 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2570 struct intel_crtc *crtc)
2572 struct drm_i915_private *i915 = to_i915(state->base.dev);
2573 const struct intel_crtc_state *old_crtc_state =
2574 intel_atomic_get_old_crtc_state(state, crtc);
2575 const struct intel_crtc_state *new_crtc_state =
2576 intel_atomic_get_new_crtc_state(state, crtc);
2577 u8 update_planes = new_crtc_state->update_planes;
2578 const struct intel_plane_state *old_plane_state;
2579 struct intel_plane *plane;
2580 bool need_vbl_wait = false;
2583 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2584 if (plane->need_async_flip_disable_wa &&
2585 plane->pipe == crtc->pipe &&
2586 update_planes & BIT(plane->id)) {
2588 * Apart from the async flip bit we want to
2589 * preserve the old state for the plane.
2591 plane->async_flip(plane, old_crtc_state,
2592 old_plane_state, false);
2593 need_vbl_wait = true;
2598 intel_wait_for_vblank(i915, crtc->pipe);
2601 static void intel_pre_plane_update(struct intel_atomic_state *state,
2602 struct intel_crtc *crtc)
2604 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2605 const struct intel_crtc_state *old_crtc_state =
2606 intel_atomic_get_old_crtc_state(state, crtc);
2607 const struct intel_crtc_state *new_crtc_state =
2608 intel_atomic_get_new_crtc_state(state, crtc);
2609 enum pipe pipe = crtc->pipe;
2611 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2612 hsw_disable_ips(old_crtc_state);
2614 if (intel_fbc_pre_update(state, crtc))
2615 intel_wait_for_vblank(dev_priv, pipe);
2617 /* Display WA 827 */
2618 if (!needs_nv12_wa(old_crtc_state) &&
2619 needs_nv12_wa(new_crtc_state))
2620 skl_wa_827(dev_priv, pipe, true);
2622 /* Wa_2006604312:icl,ehl */
2623 if (!needs_scalerclk_wa(old_crtc_state) &&
2624 needs_scalerclk_wa(new_crtc_state))
2625 icl_wa_scalerclkgating(dev_priv, pipe, true);
2628 * Vblank time updates from the shadow to live plane control register
2629 * are blocked if the memory self-refresh mode is active at that
2630 * moment. So to make sure the plane gets truly disabled, disable
2631 * first the self-refresh mode. The self-refresh enable bit in turn
2632 * will be checked/applied by the HW only at the next frame start
2633 * event which is after the vblank start event, so we need to have a
2634 * wait-for-vblank between disabling the plane and the pipe.
2636 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2637 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2638 intel_wait_for_vblank(dev_priv, pipe);
2641 * IVB workaround: must disable low power watermarks for at least
2642 * one frame before enabling scaling. LP watermarks can be re-enabled
2643 * when scaling is disabled.
2645 * WaCxSRDisabledForSpriteScaling:ivb
2647 if (old_crtc_state->hw.active &&
2648 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2649 intel_wait_for_vblank(dev_priv, pipe);
2652 * If we're doing a modeset we don't need to do any
2653 * pre-vblank watermark programming here.
2655 if (!intel_crtc_needs_modeset(new_crtc_state)) {
2657 * For platforms that support atomic watermarks, program the
2658 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
2659 * will be the intermediate values that are safe for both pre- and
2660 * post- vblank; when vblank happens, the 'active' values will be set
2661 * to the final 'target' values and we'll do this again to get the
2662 * optimal watermarks. For gen9+ platforms, the values we program here
2663 * will be the final target values which will get automatically latched
2664 * at vblank time; no further programming will be necessary.
2666 * If a platform hasn't been transitioned to atomic watermarks yet,
2667 * we'll continue to update watermarks the old way, if flags tell
2670 if (dev_priv->display.initial_watermarks)
2671 dev_priv->display.initial_watermarks(state, crtc);
2672 else if (new_crtc_state->update_wm_pre)
2673 intel_update_watermarks(crtc);
2677 * Gen2 reports pipe underruns whenever all planes are disabled.
2678 * So disable underrun reporting before all the planes get disabled.
2680 * We do this after .initial_watermarks() so that we have a
2681 * chance of catching underruns with the intermediate watermarks
2682 * vs. the old plane configuration.
2684 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2685 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2688 * WA for platforms where async address update enable bit
2689 * is double buffered and only latched at start of vblank.
2691 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2692 intel_crtc_async_flip_disable_wa(state, crtc);
2695 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2696 struct intel_crtc *crtc)
2698 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2699 const struct intel_crtc_state *new_crtc_state =
2700 intel_atomic_get_new_crtc_state(state, crtc);
2701 unsigned int update_mask = new_crtc_state->update_planes;
2702 const struct intel_plane_state *old_plane_state;
2703 struct intel_plane *plane;
2704 unsigned fb_bits = 0;
2707 intel_crtc_dpms_overlay_disable(crtc);
2709 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2710 if (crtc->pipe != plane->pipe ||
2711 !(update_mask & BIT(plane->id)))
2714 intel_disable_plane(plane, new_crtc_state);
2716 if (old_plane_state->uapi.visible)
2717 fb_bits |= plane->frontbuffer_bit;
2720 intel_frontbuffer_flip(dev_priv, fb_bits);
2724 * intel_connector_primary_encoder - get the primary encoder for a connector
2725 * @connector: connector for which to return the encoder
2727 * Returns the primary encoder for a connector. There is a 1:1 mapping from
2728 * all connectors to their encoder, except for DP-MST connectors which have
2729 * both a virtual and a primary encoder. These DP-MST primary encoders can be
2730 * pointed to by as many DP-MST connectors as there are pipes.
2732 static struct intel_encoder *
2733 intel_connector_primary_encoder(struct intel_connector *connector)
2735 struct intel_encoder *encoder;
2737 if (connector->mst_port)
2738 return &dp_to_dig_port(connector->mst_port)->base;
2740 encoder = intel_attached_encoder(connector);
2741 drm_WARN_ON(connector->base.dev, !encoder);
2746 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2748 struct drm_connector_state *new_conn_state;
2749 struct drm_connector *connector;
2752 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2754 struct intel_connector *intel_connector;
2755 struct intel_encoder *encoder;
2756 struct intel_crtc *crtc;
2758 if (!intel_connector_needs_modeset(state, connector))
2761 intel_connector = to_intel_connector(connector);
2762 encoder = intel_connector_primary_encoder(intel_connector);
2763 if (!encoder->update_prepare)
2766 crtc = new_conn_state->crtc ?
2767 to_intel_crtc(new_conn_state->crtc) : NULL;
2768 encoder->update_prepare(state, encoder, crtc);
2772 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2774 struct drm_connector_state *new_conn_state;
2775 struct drm_connector *connector;
2778 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2780 struct intel_connector *intel_connector;
2781 struct intel_encoder *encoder;
2782 struct intel_crtc *crtc;
2784 if (!intel_connector_needs_modeset(state, connector))
2787 intel_connector = to_intel_connector(connector);
2788 encoder = intel_connector_primary_encoder(intel_connector);
2789 if (!encoder->update_complete)
2792 crtc = new_conn_state->crtc ?
2793 to_intel_crtc(new_conn_state->crtc) : NULL;
2794 encoder->update_complete(state, encoder, crtc);
2798 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2799 struct intel_crtc *crtc)
2801 const struct intel_crtc_state *crtc_state =
2802 intel_atomic_get_new_crtc_state(state, crtc);
2803 const struct drm_connector_state *conn_state;
2804 struct drm_connector *conn;
2807 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2808 struct intel_encoder *encoder =
2809 to_intel_encoder(conn_state->best_encoder);
2811 if (conn_state->crtc != &crtc->base)
2814 if (encoder->pre_pll_enable)
2815 encoder->pre_pll_enable(state, encoder,
2816 crtc_state, conn_state);
2820 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2821 struct intel_crtc *crtc)
2823 const struct intel_crtc_state *crtc_state =
2824 intel_atomic_get_new_crtc_state(state, crtc);
2825 const struct drm_connector_state *conn_state;
2826 struct drm_connector *conn;
2829 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2830 struct intel_encoder *encoder =
2831 to_intel_encoder(conn_state->best_encoder);
2833 if (conn_state->crtc != &crtc->base)
2836 if (encoder->pre_enable)
2837 encoder->pre_enable(state, encoder,
2838 crtc_state, conn_state);
2842 static void intel_encoders_enable(struct intel_atomic_state *state,
2843 struct intel_crtc *crtc)
2845 const struct intel_crtc_state *crtc_state =
2846 intel_atomic_get_new_crtc_state(state, crtc);
2847 const struct drm_connector_state *conn_state;
2848 struct drm_connector *conn;
2851 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2852 struct intel_encoder *encoder =
2853 to_intel_encoder(conn_state->best_encoder);
2855 if (conn_state->crtc != &crtc->base)
2858 if (encoder->enable)
2859 encoder->enable(state, encoder,
2860 crtc_state, conn_state);
2861 intel_opregion_notify_encoder(encoder, true);
2865 static void intel_encoders_disable(struct intel_atomic_state *state,
2866 struct intel_crtc *crtc)
2868 const struct intel_crtc_state *old_crtc_state =
2869 intel_atomic_get_old_crtc_state(state, crtc);
2870 const struct drm_connector_state *old_conn_state;
2871 struct drm_connector *conn;
2874 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2875 struct intel_encoder *encoder =
2876 to_intel_encoder(old_conn_state->best_encoder);
2878 if (old_conn_state->crtc != &crtc->base)
2881 intel_opregion_notify_encoder(encoder, false);
2882 if (encoder->disable)
2883 encoder->disable(state, encoder,
2884 old_crtc_state, old_conn_state);
2888 static void intel_encoders_post_disable(struct intel_atomic_state *state,
2889 struct intel_crtc *crtc)
2891 const struct intel_crtc_state *old_crtc_state =
2892 intel_atomic_get_old_crtc_state(state, crtc);
2893 const struct drm_connector_state *old_conn_state;
2894 struct drm_connector *conn;
2897 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2898 struct intel_encoder *encoder =
2899 to_intel_encoder(old_conn_state->best_encoder);
2901 if (old_conn_state->crtc != &crtc->base)
2904 if (encoder->post_disable)
2905 encoder->post_disable(state, encoder,
2906 old_crtc_state, old_conn_state);
2910 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
2911 struct intel_crtc *crtc)
2913 const struct intel_crtc_state *old_crtc_state =
2914 intel_atomic_get_old_crtc_state(state, crtc);
2915 const struct drm_connector_state *old_conn_state;
2916 struct drm_connector *conn;
2919 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2920 struct intel_encoder *encoder =
2921 to_intel_encoder(old_conn_state->best_encoder);
2923 if (old_conn_state->crtc != &crtc->base)
2926 if (encoder->post_pll_disable)
2927 encoder->post_pll_disable(state, encoder,
2928 old_crtc_state, old_conn_state);
2932 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
2933 struct intel_crtc *crtc)
2935 const struct intel_crtc_state *crtc_state =
2936 intel_atomic_get_new_crtc_state(state, crtc);
2937 const struct drm_connector_state *conn_state;
2938 struct drm_connector *conn;
2941 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2942 struct intel_encoder *encoder =
2943 to_intel_encoder(conn_state->best_encoder);
2945 if (conn_state->crtc != &crtc->base)
2948 if (encoder->update_pipe)
2949 encoder->update_pipe(state, encoder,
2950 crtc_state, conn_state);
2954 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
2956 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2957 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2959 plane->disable_plane(plane, crtc_state);
2962 static void ilk_crtc_enable(struct intel_atomic_state *state,
2963 struct intel_crtc *crtc)
2965 const struct intel_crtc_state *new_crtc_state =
2966 intel_atomic_get_new_crtc_state(state, crtc);
2967 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2968 enum pipe pipe = crtc->pipe;
2970 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2974 * Sometimes spurious CPU pipe underruns happen during FDI
2975 * training, at least with VGA+HDMI cloning. Suppress them.
2977 * On ILK we get an occasional spurious CPU pipe underruns
2978 * between eDP port A enable and vdd enable. Also PCH port
2979 * enable seems to result in the occasional CPU pipe underrun.
2981 * Spurious PCH underruns also occur during PCH enabling.
2983 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2984 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2986 if (new_crtc_state->has_pch_encoder)
2987 intel_prepare_shared_dpll(new_crtc_state);
2989 if (intel_crtc_has_dp_encoder(new_crtc_state))
2990 intel_dp_set_m_n(new_crtc_state, M1_N1);
2992 intel_set_transcoder_timings(new_crtc_state);
2993 intel_set_pipe_src_size(new_crtc_state);
2995 if (new_crtc_state->has_pch_encoder)
2996 intel_cpu_transcoder_set_m_n(new_crtc_state,
2997 &new_crtc_state->fdi_m_n, NULL);
2999 ilk_set_pipeconf(new_crtc_state);
3001 crtc->active = true;
3003 intel_encoders_pre_enable(state, crtc);
3005 if (new_crtc_state->has_pch_encoder) {
3006 /* Note: FDI PLL enabling _must_ be done before we enable the
3007 * cpu pipes, hence this is separate from all the other fdi/pch
3009 ilk_fdi_pll_enable(new_crtc_state);
3011 assert_fdi_tx_disabled(dev_priv, pipe);
3012 assert_fdi_rx_disabled(dev_priv, pipe);
3015 ilk_pfit_enable(new_crtc_state);
3018 * On ILK+ LUT must be loaded before the pipe is running but with
3021 intel_color_load_luts(new_crtc_state);
3022 intel_color_commit(new_crtc_state);
3023 /* update DSPCNTR to configure gamma for pipe bottom color */
3024 intel_disable_primary_plane(new_crtc_state);
3026 if (dev_priv->display.initial_watermarks)
3027 dev_priv->display.initial_watermarks(state, crtc);
3028 intel_enable_pipe(new_crtc_state);
3030 if (new_crtc_state->has_pch_encoder)
3031 ilk_pch_enable(state, new_crtc_state);
3033 intel_crtc_vblank_on(new_crtc_state);
3035 intel_encoders_enable(state, crtc);
3037 if (HAS_PCH_CPT(dev_priv))
3038 cpt_verify_modeset(dev_priv, pipe);
3041 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3042 * And a second vblank wait is needed at least on ILK with
3043 * some interlaced HDMI modes. Let's do the double wait always
3044 * in case there are more corner cases we don't know about.
3046 if (new_crtc_state->has_pch_encoder) {
3047 intel_wait_for_vblank(dev_priv, pipe);
3048 intel_wait_for_vblank(dev_priv, pipe);
3050 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3051 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3054 /* IPS only exists on ULT machines and is tied to pipe A. */
3055 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3057 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3060 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3061 enum pipe pipe, bool apply)
3063 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3064 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3071 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3074 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3076 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3077 enum pipe pipe = crtc->pipe;
3080 val = MBUS_DBOX_A_CREDIT(2);
3082 if (DISPLAY_VER(dev_priv) >= 12) {
3083 val |= MBUS_DBOX_BW_CREDIT(2);
3084 val |= MBUS_DBOX_B_CREDIT(12);
3086 val |= MBUS_DBOX_BW_CREDIT(1);
3087 val |= MBUS_DBOX_B_CREDIT(8);
3090 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3093 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3095 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3096 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3098 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3099 HSW_LINETIME(crtc_state->linetime) |
3100 HSW_IPS_LINETIME(crtc_state->ips_linetime));
3103 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3105 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3107 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3110 val = intel_de_read(dev_priv, reg);
3111 val &= ~HSW_FRAME_START_DELAY_MASK;
3112 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3113 intel_de_write(dev_priv, reg, val);
3116 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3117 const struct intel_crtc_state *crtc_state)
3119 struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3120 struct intel_crtc_state *master_crtc_state;
3121 struct drm_connector_state *conn_state;
3122 struct drm_connector *conn;
3123 struct intel_encoder *encoder = NULL;
3126 if (crtc_state->bigjoiner_slave)
3127 master = crtc_state->bigjoiner_linked_crtc;
3129 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3131 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3132 if (conn_state->crtc != &master->base)
3135 encoder = to_intel_encoder(conn_state->best_encoder);
3139 if (!crtc_state->bigjoiner_slave) {
3140 /* need to enable VDSC, which we skipped in pre-enable */
3141 intel_dsc_enable(encoder, crtc_state);
3144 * Enable sequence steps 1-7 on bigjoiner master
3146 intel_encoders_pre_pll_enable(state, master);
3147 intel_enable_shared_dpll(master_crtc_state);
3148 intel_encoders_pre_enable(state, master);
3150 /* and DSC on slave */
3151 intel_dsc_enable(NULL, crtc_state);
3155 static void hsw_crtc_enable(struct intel_atomic_state *state,
3156 struct intel_crtc *crtc)
3158 const struct intel_crtc_state *new_crtc_state =
3159 intel_atomic_get_new_crtc_state(state, crtc);
3160 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3161 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3162 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3163 bool psl_clkgate_wa;
3165 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3168 if (!new_crtc_state->bigjoiner) {
3169 intel_encoders_pre_pll_enable(state, crtc);
3171 if (new_crtc_state->shared_dpll)
3172 intel_enable_shared_dpll(new_crtc_state);
3174 intel_encoders_pre_enable(state, crtc);
3176 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3179 intel_set_pipe_src_size(new_crtc_state);
3180 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3181 bdw_set_pipemisc(new_crtc_state);
3183 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3184 intel_set_transcoder_timings(new_crtc_state);
3186 if (cpu_transcoder != TRANSCODER_EDP)
3187 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3188 new_crtc_state->pixel_multiplier - 1);
3190 if (new_crtc_state->has_pch_encoder)
3191 intel_cpu_transcoder_set_m_n(new_crtc_state,
3192 &new_crtc_state->fdi_m_n, NULL);
3194 hsw_set_frame_start_delay(new_crtc_state);
3197 if (!transcoder_is_dsi(cpu_transcoder))
3198 hsw_set_pipeconf(new_crtc_state);
3200 crtc->active = true;
3202 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3203 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3204 new_crtc_state->pch_pfit.enabled;
3206 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3208 if (DISPLAY_VER(dev_priv) >= 9)
3209 skl_pfit_enable(new_crtc_state);
3211 ilk_pfit_enable(new_crtc_state);
3214 * On ILK+ LUT must be loaded before the pipe is running but with
3217 intel_color_load_luts(new_crtc_state);
3218 intel_color_commit(new_crtc_state);
3219 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
3220 if (DISPLAY_VER(dev_priv) < 9)
3221 intel_disable_primary_plane(new_crtc_state);
3223 hsw_set_linetime_wm(new_crtc_state);
3225 if (DISPLAY_VER(dev_priv) >= 11)
3226 icl_set_pipe_chicken(crtc);
3228 if (dev_priv->display.initial_watermarks)
3229 dev_priv->display.initial_watermarks(state, crtc);
3231 if (DISPLAY_VER(dev_priv) >= 11)
3232 icl_pipe_mbus_enable(crtc);
3234 if (new_crtc_state->bigjoiner_slave)
3235 intel_crtc_vblank_on(new_crtc_state);
3237 intel_encoders_enable(state, crtc);
3239 if (psl_clkgate_wa) {
3240 intel_wait_for_vblank(dev_priv, pipe);
3241 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3244 /* If we change the relative order between pipe/planes enabling, we need
3245 * to change the workaround. */
3246 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3247 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3248 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3249 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3253 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3255 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3256 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3257 enum pipe pipe = crtc->pipe;
3259 /* To avoid upsetting the power well on haswell only disable the pfit if
3260 * it's in use. The hw state code will make sure we get this right. */
3261 if (!old_crtc_state->pch_pfit.enabled)
3264 intel_de_write(dev_priv, PF_CTL(pipe), 0);
3265 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3266 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3269 static void ilk_crtc_disable(struct intel_atomic_state *state,
3270 struct intel_crtc *crtc)
3272 const struct intel_crtc_state *old_crtc_state =
3273 intel_atomic_get_old_crtc_state(state, crtc);
3274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3275 enum pipe pipe = crtc->pipe;
3278 * Sometimes spurious CPU pipe underruns happen when the
3279 * pipe is already disabled, but FDI RX/TX is still enabled.
3280 * Happens at least with VGA+HDMI cloning. Suppress them.
3282 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3283 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3285 intel_encoders_disable(state, crtc);
3287 intel_crtc_vblank_off(old_crtc_state);
3289 intel_disable_pipe(old_crtc_state);
3291 ilk_pfit_disable(old_crtc_state);
3293 if (old_crtc_state->has_pch_encoder)
3294 ilk_fdi_disable(crtc);
3296 intel_encoders_post_disable(state, crtc);
3298 if (old_crtc_state->has_pch_encoder) {
3299 ilk_disable_pch_transcoder(dev_priv, pipe);
3301 if (HAS_PCH_CPT(dev_priv)) {
3305 /* disable TRANS_DP_CTL */
3306 reg = TRANS_DP_CTL(pipe);
3307 temp = intel_de_read(dev_priv, reg);
3308 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3309 TRANS_DP_PORT_SEL_MASK);
3310 temp |= TRANS_DP_PORT_SEL_NONE;
3311 intel_de_write(dev_priv, reg, temp);
3313 /* disable DPLL_SEL */
3314 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3315 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3316 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3319 ilk_fdi_pll_disable(crtc);
3322 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3323 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3326 static void hsw_crtc_disable(struct intel_atomic_state *state,
3327 struct intel_crtc *crtc)
3330 * FIXME collapse everything to one hook.
3331 * Need care with mst->ddi interactions.
3333 intel_encoders_disable(state, crtc);
3334 intel_encoders_post_disable(state, crtc);
3337 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3339 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3340 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3342 if (!crtc_state->gmch_pfit.control)
3346 * The panel fitter should only be adjusted whilst the pipe is disabled,
3347 * according to register description and PRM.
3349 drm_WARN_ON(&dev_priv->drm,
3350 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3351 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3353 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3354 crtc_state->gmch_pfit.pgm_ratios);
3355 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3357 /* Border color in case we don't scale up to the full screen. Black by
3358 * default, change to something else for debugging. */
3359 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3362 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3364 if (phy == PHY_NONE)
3366 else if (IS_ALDERLAKE_S(dev_priv))
3367 return phy <= PHY_E;
3368 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3369 return phy <= PHY_D;
3370 else if (IS_JSL_EHL(dev_priv))
3371 return phy <= PHY_C;
3372 else if (DISPLAY_VER(dev_priv) >= 11)
3373 return phy <= PHY_B;
3378 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3380 if (IS_TIGERLAKE(dev_priv))
3381 return phy >= PHY_D && phy <= PHY_I;
3382 else if (IS_ICELAKE(dev_priv))
3383 return phy >= PHY_C && phy <= PHY_F;
3388 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3390 if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3391 return PHY_B + port - PORT_TC1;
3392 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3393 return PHY_C + port - PORT_TC1;
3394 else if (IS_JSL_EHL(i915) && port == PORT_D)
3397 return PHY_A + port - PORT_A;
3400 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3402 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3403 return TC_PORT_NONE;
3405 if (DISPLAY_VER(dev_priv) >= 12)
3406 return TC_PORT_1 + port - PORT_TC1;
3408 return TC_PORT_1 + port - PORT_C;
3411 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3415 return POWER_DOMAIN_PORT_DDI_A_LANES;
3417 return POWER_DOMAIN_PORT_DDI_B_LANES;
3419 return POWER_DOMAIN_PORT_DDI_C_LANES;
3421 return POWER_DOMAIN_PORT_DDI_D_LANES;
3423 return POWER_DOMAIN_PORT_DDI_E_LANES;
3425 return POWER_DOMAIN_PORT_DDI_F_LANES;
3427 return POWER_DOMAIN_PORT_DDI_G_LANES;
3429 return POWER_DOMAIN_PORT_DDI_H_LANES;
3431 return POWER_DOMAIN_PORT_DDI_I_LANES;
3434 return POWER_DOMAIN_PORT_OTHER;
3438 enum intel_display_power_domain
3439 intel_aux_power_domain(struct intel_digital_port *dig_port)
3441 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3442 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3444 if (intel_phy_is_tc(dev_priv, phy) &&
3445 dig_port->tc_mode == TC_PORT_TBT_ALT) {
3446 switch (dig_port->aux_ch) {
3448 return POWER_DOMAIN_AUX_C_TBT;
3450 return POWER_DOMAIN_AUX_D_TBT;
3452 return POWER_DOMAIN_AUX_E_TBT;
3454 return POWER_DOMAIN_AUX_F_TBT;
3456 return POWER_DOMAIN_AUX_G_TBT;
3458 return POWER_DOMAIN_AUX_H_TBT;
3460 return POWER_DOMAIN_AUX_I_TBT;
3462 MISSING_CASE(dig_port->aux_ch);
3463 return POWER_DOMAIN_AUX_C_TBT;
3467 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3471 * Converts aux_ch to power_domain without caring about TBT ports for that use
3472 * intel_aux_power_domain()
3474 enum intel_display_power_domain
3475 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3479 return POWER_DOMAIN_AUX_A;
3481 return POWER_DOMAIN_AUX_B;
3483 return POWER_DOMAIN_AUX_C;
3485 return POWER_DOMAIN_AUX_D;
3487 return POWER_DOMAIN_AUX_E;
3489 return POWER_DOMAIN_AUX_F;
3491 return POWER_DOMAIN_AUX_G;
3493 return POWER_DOMAIN_AUX_H;
3495 return POWER_DOMAIN_AUX_I;
3497 MISSING_CASE(aux_ch);
3498 return POWER_DOMAIN_AUX_A;
3502 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3504 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3506 struct drm_encoder *encoder;
3507 enum pipe pipe = crtc->pipe;
3509 enum transcoder transcoder = crtc_state->cpu_transcoder;
3511 if (!crtc_state->hw.active)
3514 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3515 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3516 if (crtc_state->pch_pfit.enabled ||
3517 crtc_state->pch_pfit.force_thru)
3518 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3520 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3521 crtc_state->uapi.encoder_mask) {
3522 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3524 mask |= BIT_ULL(intel_encoder->power_domain);
3527 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3528 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
3530 if (crtc_state->shared_dpll)
3531 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3533 if (crtc_state->dsc.compression_enable)
3534 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3540 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3542 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3543 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3544 enum intel_display_power_domain domain;
3545 u64 domains, new_domains, old_domains;
3547 domains = get_crtc_power_domains(crtc_state);
3549 new_domains = domains & ~crtc->enabled_power_domains.mask;
3550 old_domains = crtc->enabled_power_domains.mask & ~domains;
3552 for_each_power_domain(domain, new_domains)
3553 intel_display_power_get_in_set(dev_priv,
3554 &crtc->enabled_power_domains,
3560 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3563 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3564 &crtc->enabled_power_domains,
3568 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3569 struct intel_crtc *crtc)
3571 const struct intel_crtc_state *new_crtc_state =
3572 intel_atomic_get_new_crtc_state(state, crtc);
3573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3574 enum pipe pipe = crtc->pipe;
3576 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3579 if (intel_crtc_has_dp_encoder(new_crtc_state))
3580 intel_dp_set_m_n(new_crtc_state, M1_N1);
3582 intel_set_transcoder_timings(new_crtc_state);
3583 intel_set_pipe_src_size(new_crtc_state);
3585 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3586 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3587 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3590 i9xx_set_pipeconf(new_crtc_state);
3592 crtc->active = true;
3594 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3596 intel_encoders_pre_pll_enable(state, crtc);
3598 if (IS_CHERRYVIEW(dev_priv)) {
3599 chv_prepare_pll(crtc, new_crtc_state);
3600 chv_enable_pll(crtc, new_crtc_state);
3602 vlv_prepare_pll(crtc, new_crtc_state);
3603 vlv_enable_pll(crtc, new_crtc_state);
3606 intel_encoders_pre_enable(state, crtc);
3608 i9xx_pfit_enable(new_crtc_state);
3610 intel_color_load_luts(new_crtc_state);
3611 intel_color_commit(new_crtc_state);
3612 /* update DSPCNTR to configure gamma for pipe bottom color */
3613 intel_disable_primary_plane(new_crtc_state);
3615 dev_priv->display.initial_watermarks(state, crtc);
3616 intel_enable_pipe(new_crtc_state);
3618 intel_crtc_vblank_on(new_crtc_state);
3620 intel_encoders_enable(state, crtc);
3623 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
3625 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3626 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3628 intel_de_write(dev_priv, FP0(crtc->pipe),
3629 crtc_state->dpll_hw_state.fp0);
3630 intel_de_write(dev_priv, FP1(crtc->pipe),
3631 crtc_state->dpll_hw_state.fp1);
3634 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3635 struct intel_crtc *crtc)
3637 const struct intel_crtc_state *new_crtc_state =
3638 intel_atomic_get_new_crtc_state(state, crtc);
3639 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3640 enum pipe pipe = crtc->pipe;
3642 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3645 i9xx_set_pll_dividers(new_crtc_state);
3647 if (intel_crtc_has_dp_encoder(new_crtc_state))
3648 intel_dp_set_m_n(new_crtc_state, M1_N1);
3650 intel_set_transcoder_timings(new_crtc_state);
3651 intel_set_pipe_src_size(new_crtc_state);
3653 i9xx_set_pipeconf(new_crtc_state);
3655 crtc->active = true;
3657 if (DISPLAY_VER(dev_priv) != 2)
3658 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3660 intel_encoders_pre_enable(state, crtc);
3662 i9xx_enable_pll(crtc, new_crtc_state);
3664 i9xx_pfit_enable(new_crtc_state);
3666 intel_color_load_luts(new_crtc_state);
3667 intel_color_commit(new_crtc_state);
3668 /* update DSPCNTR to configure gamma for pipe bottom color */
3669 intel_disable_primary_plane(new_crtc_state);
3671 if (dev_priv->display.initial_watermarks)
3672 dev_priv->display.initial_watermarks(state, crtc);
3674 intel_update_watermarks(crtc);
3675 intel_enable_pipe(new_crtc_state);
3677 intel_crtc_vblank_on(new_crtc_state);
3679 intel_encoders_enable(state, crtc);
3681 /* prevents spurious underruns */
3682 if (DISPLAY_VER(dev_priv) == 2)
3683 intel_wait_for_vblank(dev_priv, pipe);
3686 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3688 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3689 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3691 if (!old_crtc_state->gmch_pfit.control)
3694 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3696 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3697 intel_de_read(dev_priv, PFIT_CONTROL));
3698 intel_de_write(dev_priv, PFIT_CONTROL, 0);
3701 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3702 struct intel_crtc *crtc)
3704 struct intel_crtc_state *old_crtc_state =
3705 intel_atomic_get_old_crtc_state(state, crtc);
3706 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3707 enum pipe pipe = crtc->pipe;
3710 * On gen2 planes are double buffered but the pipe isn't, so we must
3711 * wait for planes to fully turn off before disabling the pipe.
3713 if (DISPLAY_VER(dev_priv) == 2)
3714 intel_wait_for_vblank(dev_priv, pipe);
3716 intel_encoders_disable(state, crtc);
3718 intel_crtc_vblank_off(old_crtc_state);
3720 intel_disable_pipe(old_crtc_state);
3722 i9xx_pfit_disable(old_crtc_state);
3724 intel_encoders_post_disable(state, crtc);
3726 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3727 if (IS_CHERRYVIEW(dev_priv))
3728 chv_disable_pll(dev_priv, pipe);
3729 else if (IS_VALLEYVIEW(dev_priv))
3730 vlv_disable_pll(dev_priv, pipe);
3732 i9xx_disable_pll(old_crtc_state);
3735 intel_encoders_post_pll_disable(state, crtc);
3737 if (DISPLAY_VER(dev_priv) != 2)
3738 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3740 if (!dev_priv->display.initial_watermarks)
3741 intel_update_watermarks(crtc);
3743 /* clock the pipe down to 640x480@60 to potentially save power */
3744 if (IS_I830(dev_priv))
3745 i830_enable_pipe(dev_priv, pipe);
3748 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3749 struct drm_modeset_acquire_ctx *ctx)
3751 struct intel_encoder *encoder;
3752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3753 struct intel_bw_state *bw_state =
3754 to_intel_bw_state(dev_priv->bw_obj.state);
3755 struct intel_cdclk_state *cdclk_state =
3756 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3757 struct intel_dbuf_state *dbuf_state =
3758 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3759 struct intel_crtc_state *crtc_state =
3760 to_intel_crtc_state(crtc->base.state);
3761 struct intel_plane *plane;
3762 struct drm_atomic_state *state;
3763 struct intel_crtc_state *temp_crtc_state;
3764 enum pipe pipe = crtc->pipe;
3767 if (!crtc_state->hw.active)
3770 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3771 const struct intel_plane_state *plane_state =
3772 to_intel_plane_state(plane->base.state);
3774 if (plane_state->uapi.visible)
3775 intel_plane_disable_noatomic(crtc, plane);
3778 state = drm_atomic_state_alloc(&dev_priv->drm);
3780 drm_dbg_kms(&dev_priv->drm,
3781 "failed to disable [CRTC:%d:%s], out of memory",
3782 crtc->base.base.id, crtc->base.name);
3786 state->acquire_ctx = ctx;
3788 /* Everything's already locked, -EDEADLK can't happen. */
3789 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3790 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3792 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3794 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3796 drm_atomic_state_put(state);
3798 drm_dbg_kms(&dev_priv->drm,
3799 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
3800 crtc->base.base.id, crtc->base.name);
3802 crtc->active = false;
3803 crtc->base.enabled = false;
3805 drm_WARN_ON(&dev_priv->drm,
3806 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
3807 crtc_state->uapi.active = false;
3808 crtc_state->uapi.connector_mask = 0;
3809 crtc_state->uapi.encoder_mask = 0;
3810 intel_crtc_free_hw_state(crtc_state);
3811 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
3813 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
3814 encoder->base.crtc = NULL;
3816 intel_fbc_disable(crtc);
3817 intel_update_watermarks(crtc);
3818 intel_disable_shared_dpll(crtc_state);
3820 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
3822 dev_priv->active_pipes &= ~BIT(pipe);
3823 cdclk_state->min_cdclk[pipe] = 0;
3824 cdclk_state->min_voltage_level[pipe] = 0;
3825 cdclk_state->active_pipes &= ~BIT(pipe);
3827 dbuf_state->active_pipes &= ~BIT(pipe);
3829 bw_state->data_rate[pipe] = 0;
3830 bw_state->num_active_planes[pipe] = 0;
3834 * turn all crtc's off, but do not adjust state
3835 * This has to be paired with a call to intel_modeset_setup_hw_state.
3837 int intel_display_suspend(struct drm_device *dev)
3839 struct drm_i915_private *dev_priv = to_i915(dev);
3840 struct drm_atomic_state *state;
3843 state = drm_atomic_helper_suspend(dev);
3844 ret = PTR_ERR_OR_ZERO(state);
3846 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
3849 dev_priv->modeset_restore_state = state;
3853 void intel_encoder_destroy(struct drm_encoder *encoder)
3855 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3857 drm_encoder_cleanup(encoder);
3858 kfree(intel_encoder);
3861 /* Cross check the actual hw state with our own modeset state tracking (and it's
3862 * internal consistency). */
3863 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
3864 struct drm_connector_state *conn_state)
3866 struct intel_connector *connector = to_intel_connector(conn_state->connector);
3867 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3869 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
3870 connector->base.base.id, connector->base.name);
3872 if (connector->get_hw_state(connector)) {
3873 struct intel_encoder *encoder = intel_attached_encoder(connector);
3875 I915_STATE_WARN(!crtc_state,
3876 "connector enabled without attached crtc\n");
3881 I915_STATE_WARN(!crtc_state->hw.active,
3882 "connector is active, but attached crtc isn't\n");
3884 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3887 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
3888 "atomic encoder doesn't match attached encoder\n");
3890 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
3891 "attached encoder crtc differs from connector crtc\n");
3893 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
3894 "attached crtc is active, but connector isn't\n");
3895 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
3896 "best encoder set without crtc!\n");
3900 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
3902 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3905 /* IPS only exists on ULT machines and is tied to pipe A. */
3906 if (!hsw_crtc_supports_ips(crtc))
3909 if (!dev_priv->params.enable_ips)
3912 if (crtc_state->pipe_bpp > 24)
3916 * We compare against max which means we must take
3917 * the increased cdclk requirement into account when
3918 * calculating the new cdclk.
3920 * Should measure whether using a lower cdclk w/o IPS
3922 if (IS_BROADWELL(dev_priv) &&
3923 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
3929 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
3931 struct drm_i915_private *dev_priv =
3932 to_i915(crtc_state->uapi.crtc->dev);
3933 struct intel_atomic_state *state =
3934 to_intel_atomic_state(crtc_state->uapi.state);
3936 crtc_state->ips_enabled = false;
3938 if (!hsw_crtc_state_ips_capable(crtc_state))
3942 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3943 * enabled and disabled dynamically based on package C states,
3944 * user space can't make reliable use of the CRCs, so let's just
3945 * completely disable it.
3947 if (crtc_state->crc_enabled)
3950 /* IPS should be fine as long as at least one plane is enabled. */
3951 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
3954 if (IS_BROADWELL(dev_priv)) {
3955 const struct intel_cdclk_state *cdclk_state;
3957 cdclk_state = intel_atomic_get_cdclk_state(state);
3958 if (IS_ERR(cdclk_state))
3959 return PTR_ERR(cdclk_state);
3961 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
3962 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
3966 crtc_state->ips_enabled = true;
3971 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
3973 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3975 /* GDG double wide on either pipe, otherwise pipe A only */
3976 return DISPLAY_VER(dev_priv) < 4 &&
3977 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
3980 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
3982 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
3983 unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
3986 * We only use IF-ID interlacing. If we ever use
3987 * PF-ID we'll need to adjust the pixel_rate here.
3990 if (!crtc_state->pch_pfit.enabled)
3993 pipe_w = crtc_state->pipe_src_w;
3994 pipe_h = crtc_state->pipe_src_h;
3996 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
3997 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
3999 if (pipe_w < pfit_w)
4001 if (pipe_h < pfit_h)
4004 if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4005 !pfit_w || !pfit_h))
4008 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4012 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4013 const struct drm_display_mode *timings)
4015 mode->hdisplay = timings->crtc_hdisplay;
4016 mode->htotal = timings->crtc_htotal;
4017 mode->hsync_start = timings->crtc_hsync_start;
4018 mode->hsync_end = timings->crtc_hsync_end;
4020 mode->vdisplay = timings->crtc_vdisplay;
4021 mode->vtotal = timings->crtc_vtotal;
4022 mode->vsync_start = timings->crtc_vsync_start;
4023 mode->vsync_end = timings->crtc_vsync_end;
4025 mode->flags = timings->flags;
4026 mode->type = DRM_MODE_TYPE_DRIVER;
4028 mode->clock = timings->crtc_clock;
4030 drm_mode_set_name(mode);
4033 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4035 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4037 if (HAS_GMCH(dev_priv))
4038 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
4039 crtc_state->pixel_rate =
4040 crtc_state->hw.pipe_mode.crtc_clock;
4042 crtc_state->pixel_rate =
4043 ilk_pipe_pixel_rate(crtc_state);
4046 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4048 struct drm_display_mode *mode = &crtc_state->hw.mode;
4049 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4050 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4052 drm_mode_copy(pipe_mode, adjusted_mode);
4054 if (crtc_state->bigjoiner) {
4056 * transcoder is programmed to the full mode,
4057 * but pipe timings are half of the transcoder mode
4059 pipe_mode->crtc_hdisplay /= 2;
4060 pipe_mode->crtc_hblank_start /= 2;
4061 pipe_mode->crtc_hblank_end /= 2;
4062 pipe_mode->crtc_hsync_start /= 2;
4063 pipe_mode->crtc_hsync_end /= 2;
4064 pipe_mode->crtc_htotal /= 2;
4065 pipe_mode->crtc_clock /= 2;
4068 if (crtc_state->splitter.enable) {
4069 int n = crtc_state->splitter.link_count;
4070 int overlap = crtc_state->splitter.pixel_overlap;
4073 * eDP MSO uses segment timings from EDID for transcoder
4074 * timings, but full mode for everything else.
4076 * h_full = (h_segment - pixel_overlap) * link_count
4078 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4079 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4080 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4081 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4082 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4083 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4084 pipe_mode->crtc_clock *= n;
4086 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4087 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4089 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4090 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4093 intel_crtc_compute_pixel_rate(crtc_state);
4095 drm_mode_copy(mode, adjusted_mode);
4096 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4097 mode->vdisplay = crtc_state->pipe_src_h;
4100 static void intel_encoder_get_config(struct intel_encoder *encoder,
4101 struct intel_crtc_state *crtc_state)
4103 encoder->get_config(encoder, crtc_state);
4105 intel_crtc_readout_derived_state(crtc_state);
4108 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4109 struct intel_crtc_state *pipe_config)
4111 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4112 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4113 int clock_limit = dev_priv->max_dotclk_freq;
4115 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4117 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4118 if (pipe_config->bigjoiner) {
4119 pipe_mode->crtc_clock /= 2;
4120 pipe_mode->crtc_hdisplay /= 2;
4121 pipe_mode->crtc_hblank_start /= 2;
4122 pipe_mode->crtc_hblank_end /= 2;
4123 pipe_mode->crtc_hsync_start /= 2;
4124 pipe_mode->crtc_hsync_end /= 2;
4125 pipe_mode->crtc_htotal /= 2;
4126 pipe_config->pipe_src_w /= 2;
4129 if (pipe_config->splitter.enable) {
4130 int n = pipe_config->splitter.link_count;
4131 int overlap = pipe_config->splitter.pixel_overlap;
4133 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4134 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4135 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4136 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4137 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4138 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4139 pipe_mode->crtc_clock *= n;
4142 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4144 if (DISPLAY_VER(dev_priv) < 4) {
4145 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4148 * Enable double wide mode when the dot clock
4149 * is > 90% of the (display) core speed.
4151 if (intel_crtc_supports_double_wide(crtc) &&
4152 pipe_mode->crtc_clock > clock_limit) {
4153 clock_limit = dev_priv->max_dotclk_freq;
4154 pipe_config->double_wide = true;
4158 if (pipe_mode->crtc_clock > clock_limit) {
4159 drm_dbg_kms(&dev_priv->drm,
4160 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4161 pipe_mode->crtc_clock, clock_limit,
4162 yesno(pipe_config->double_wide));
4167 * Pipe horizontal size must be even in:
4169 * - LVDS dual channel mode
4170 * - Double wide pipe
4172 if (pipe_config->pipe_src_w & 1) {
4173 if (pipe_config->double_wide) {
4174 drm_dbg_kms(&dev_priv->drm,
4175 "Odd pipe source width not supported with double wide pipe\n");
4179 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4180 intel_is_dual_link_lvds(dev_priv)) {
4181 drm_dbg_kms(&dev_priv->drm,
4182 "Odd pipe source width not supported with dual link LVDS\n");
4187 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4188 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4190 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4191 pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4194 intel_crtc_compute_pixel_rate(pipe_config);
4196 if (pipe_config->has_pch_encoder)
4197 return ilk_fdi_compute_config(crtc, pipe_config);
4203 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4205 while (*num > DATA_LINK_M_N_MASK ||
4206 *den > DATA_LINK_M_N_MASK) {
4212 static void compute_m_n(unsigned int m, unsigned int n,
4213 u32 *ret_m, u32 *ret_n,
4217 * Several DP dongles in particular seem to be fussy about
4218 * too large link M/N values. Give N value as 0x8000 that
4219 * should be acceptable by specific devices. 0x8000 is the
4220 * specified fixed N value for asynchronous clock mode,
4221 * which the devices expect also in synchronous clock mode.
4224 *ret_n = DP_LINK_CONSTANT_N_VALUE;
4226 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4228 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4229 intel_reduce_m_n_ratio(ret_m, ret_n);
4233 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4234 int pixel_clock, int link_clock,
4235 struct intel_link_m_n *m_n,
4236 bool constant_n, bool fec_enable)
4238 u32 data_clock = bits_per_pixel * pixel_clock;
4241 data_clock = intel_dp_mode_to_fec_clock(data_clock);
4244 compute_m_n(data_clock,
4245 link_clock * nlanes * 8,
4246 &m_n->gmch_m, &m_n->gmch_n,
4249 compute_m_n(pixel_clock, link_clock,
4250 &m_n->link_m, &m_n->link_n,
4254 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4257 * There may be no VBT; and if the BIOS enabled SSC we can
4258 * just keep using it to avoid unnecessary flicker. Whereas if the
4259 * BIOS isn't using it, don't assume it will work even if the VBT
4260 * indicates as much.
4262 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4263 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4267 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4268 drm_dbg_kms(&dev_priv->drm,
4269 "SSC %s by BIOS, overriding VBT which says %s\n",
4270 enableddisabled(bios_lvds_use_ssc),
4271 enableddisabled(dev_priv->vbt.lvds_use_ssc));
4272 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4277 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4278 const struct intel_link_m_n *m_n)
4280 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4281 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4282 enum pipe pipe = crtc->pipe;
4284 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4285 TU_SIZE(m_n->tu) | m_n->gmch_m);
4286 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4287 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4288 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4291 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4292 enum transcoder transcoder)
4294 if (IS_HASWELL(dev_priv))
4295 return transcoder == TRANSCODER_EDP;
4298 * Strictly speaking some registers are available before
4299 * gen7, but we only support DRRS on gen7+
4301 return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4304 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4305 const struct intel_link_m_n *m_n,
4306 const struct intel_link_m_n *m2_n2)
4308 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4309 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4310 enum pipe pipe = crtc->pipe;
4311 enum transcoder transcoder = crtc_state->cpu_transcoder;
4313 if (DISPLAY_VER(dev_priv) >= 5) {
4314 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4315 TU_SIZE(m_n->tu) | m_n->gmch_m);
4316 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4318 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4320 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4323 * M2_N2 registers are set only if DRRS is supported
4324 * (to make sure the registers are not unnecessarily accessed).
4326 if (m2_n2 && crtc_state->has_drrs &&
4327 transcoder_has_m2_n2(dev_priv, transcoder)) {
4328 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4329 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4330 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4332 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4334 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4338 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4339 TU_SIZE(m_n->tu) | m_n->gmch_m);
4340 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4341 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4342 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4346 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4348 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4349 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4352 dp_m_n = &crtc_state->dp_m_n;
4353 dp_m2_n2 = &crtc_state->dp_m2_n2;
4354 } else if (m_n == M2_N2) {
4357 * M2_N2 registers are not supported. Hence m2_n2 divider value
4358 * needs to be programmed into M1_N1.
4360 dp_m_n = &crtc_state->dp_m2_n2;
4362 drm_err(&i915->drm, "Unsupported divider value\n");
4366 if (crtc_state->has_pch_encoder)
4367 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4369 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4372 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4374 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4375 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4376 enum pipe pipe = crtc->pipe;
4377 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4378 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4379 u32 crtc_vtotal, crtc_vblank_end;
4382 /* We need to be careful not to changed the adjusted mode, for otherwise
4383 * the hw state checker will get angry at the mismatch. */
4384 crtc_vtotal = adjusted_mode->crtc_vtotal;
4385 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4387 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4388 /* the chip adds 2 halflines automatically */
4390 crtc_vblank_end -= 1;
4392 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4393 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4395 vsyncshift = adjusted_mode->crtc_hsync_start -
4396 adjusted_mode->crtc_htotal / 2;
4398 vsyncshift += adjusted_mode->crtc_htotal;
4401 if (DISPLAY_VER(dev_priv) > 3)
4402 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4405 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4406 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4407 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4408 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4409 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4410 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4412 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4413 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4414 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4415 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4416 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4417 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4419 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4420 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4421 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4423 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4424 (pipe == PIPE_B || pipe == PIPE_C))
4425 intel_de_write(dev_priv, VTOTAL(pipe),
4426 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4430 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4432 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4434 enum pipe pipe = crtc->pipe;
4436 /* pipesrc controls the size that is scaled from, which should
4437 * always be the user's requested size.
4439 intel_de_write(dev_priv, PIPESRC(pipe),
4440 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4443 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4445 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4446 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4448 if (DISPLAY_VER(dev_priv) == 2)
4451 if (DISPLAY_VER(dev_priv) >= 9 ||
4452 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4453 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4455 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4458 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4459 struct intel_crtc_state *pipe_config)
4461 struct drm_device *dev = crtc->base.dev;
4462 struct drm_i915_private *dev_priv = to_i915(dev);
4463 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4466 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4467 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4468 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4470 if (!transcoder_is_dsi(cpu_transcoder)) {
4471 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4472 pipe_config->hw.adjusted_mode.crtc_hblank_start =
4474 pipe_config->hw.adjusted_mode.crtc_hblank_end =
4475 ((tmp >> 16) & 0xffff) + 1;
4477 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4478 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4479 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4481 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4482 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4483 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4485 if (!transcoder_is_dsi(cpu_transcoder)) {
4486 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4487 pipe_config->hw.adjusted_mode.crtc_vblank_start =
4489 pipe_config->hw.adjusted_mode.crtc_vblank_end =
4490 ((tmp >> 16) & 0xffff) + 1;
4492 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4493 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4494 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4496 if (intel_pipe_is_interlaced(pipe_config)) {
4497 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4498 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4499 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4503 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4504 struct intel_crtc_state *pipe_config)
4506 struct drm_device *dev = crtc->base.dev;
4507 struct drm_i915_private *dev_priv = to_i915(dev);
4510 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4511 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4512 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4515 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4517 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4518 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4523 /* we keep both pipes enabled on 830 */
4524 if (IS_I830(dev_priv))
4525 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4527 if (crtc_state->double_wide)
4528 pipeconf |= PIPECONF_DOUBLE_WIDE;
4530 /* only g4x and later have fancy bpc/dither controls */
4531 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4532 IS_CHERRYVIEW(dev_priv)) {
4533 /* Bspec claims that we can't use dithering for 30bpp pipes. */
4534 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4535 pipeconf |= PIPECONF_DITHER_EN |
4536 PIPECONF_DITHER_TYPE_SP;
4538 switch (crtc_state->pipe_bpp) {
4540 pipeconf |= PIPECONF_6BPC;
4543 pipeconf |= PIPECONF_8BPC;
4546 pipeconf |= PIPECONF_10BPC;
4549 /* Case prevented by intel_choose_pipe_bpp_dither. */
4554 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4555 if (DISPLAY_VER(dev_priv) < 4 ||
4556 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4557 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4559 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4561 pipeconf |= PIPECONF_PROGRESSIVE;
4564 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4565 crtc_state->limited_color_range)
4566 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4568 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4570 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4572 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4573 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4576 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4578 if (IS_I830(dev_priv))
4581 return DISPLAY_VER(dev_priv) >= 4 ||
4582 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4585 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4587 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4591 if (!i9xx_has_pfit(dev_priv))
4594 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4595 if (!(tmp & PFIT_ENABLE))
4598 /* Check whether the pfit is attached to our pipe. */
4599 if (DISPLAY_VER(dev_priv) < 4) {
4600 if (crtc->pipe != PIPE_B)
4603 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4607 crtc_state->gmch_pfit.control = tmp;
4608 crtc_state->gmch_pfit.pgm_ratios =
4609 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4612 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4613 struct intel_crtc_state *pipe_config)
4615 struct drm_device *dev = crtc->base.dev;
4616 struct drm_i915_private *dev_priv = to_i915(dev);
4617 enum pipe pipe = crtc->pipe;
4620 int refclk = 100000;
4622 /* In case of DSI, DPLL will not be used */
4623 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4626 vlv_dpio_get(dev_priv);
4627 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4628 vlv_dpio_put(dev_priv);
4630 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4631 clock.m2 = mdiv & DPIO_M2DIV_MASK;
4632 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4633 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4634 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4636 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4639 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4640 struct intel_crtc_state *pipe_config)
4642 struct drm_device *dev = crtc->base.dev;
4643 struct drm_i915_private *dev_priv = to_i915(dev);
4644 enum pipe pipe = crtc->pipe;
4645 enum dpio_channel port = vlv_pipe_to_channel(pipe);
4647 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4648 int refclk = 100000;
4650 /* In case of DSI, DPLL will not be used */
4651 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4654 vlv_dpio_get(dev_priv);
4655 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4656 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4657 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4658 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4659 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4660 vlv_dpio_put(dev_priv);
4662 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4663 clock.m2 = (pll_dw0 & 0xff) << 22;
4664 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4665 clock.m2 |= pll_dw2 & 0x3fffff;
4666 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4667 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4668 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4670 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4673 static enum intel_output_format
4674 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4676 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4679 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4681 if (tmp & PIPEMISC_YUV420_ENABLE) {
4682 /* We support 4:2:0 in full blend mode only */
4683 drm_WARN_ON(&dev_priv->drm,
4684 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4686 return INTEL_OUTPUT_FORMAT_YCBCR420;
4687 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4688 return INTEL_OUTPUT_FORMAT_YCBCR444;
4690 return INTEL_OUTPUT_FORMAT_RGB;
4694 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4696 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4697 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4698 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4699 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4702 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4704 if (tmp & DISPPLANE_GAMMA_ENABLE)
4705 crtc_state->gamma_enable = true;
4707 if (!HAS_GMCH(dev_priv) &&
4708 tmp & DISPPLANE_PIPE_CSC_ENABLE)
4709 crtc_state->csc_enable = true;
4712 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4713 struct intel_crtc_state *pipe_config)
4715 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4716 enum intel_display_power_domain power_domain;
4717 intel_wakeref_t wakeref;
4721 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4722 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4726 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4727 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4728 pipe_config->shared_dpll = NULL;
4732 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4733 if (!(tmp & PIPECONF_ENABLE))
4736 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4737 IS_CHERRYVIEW(dev_priv)) {
4738 switch (tmp & PIPECONF_BPC_MASK) {
4740 pipe_config->pipe_bpp = 18;
4743 pipe_config->pipe_bpp = 24;
4745 case PIPECONF_10BPC:
4746 pipe_config->pipe_bpp = 30;
4753 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4754 (tmp & PIPECONF_COLOR_RANGE_SELECT))
4755 pipe_config->limited_color_range = true;
4757 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4758 PIPECONF_GAMMA_MODE_SHIFT;
4760 if (IS_CHERRYVIEW(dev_priv))
4761 pipe_config->cgm_mode = intel_de_read(dev_priv,
4762 CGM_PIPE_MODE(crtc->pipe));
4764 i9xx_get_pipe_color_config(pipe_config);
4765 intel_color_get_config(pipe_config);
4767 if (DISPLAY_VER(dev_priv) < 4)
4768 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4770 intel_get_transcoder_timings(crtc, pipe_config);
4771 intel_get_pipe_src_size(crtc, pipe_config);
4773 i9xx_get_pfit_config(pipe_config);
4775 if (DISPLAY_VER(dev_priv) >= 4) {
4776 /* No way to read it out on pipes B and C */
4777 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4778 tmp = dev_priv->chv_dpll_md[crtc->pipe];
4780 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4781 pipe_config->pixel_multiplier =
4782 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4783 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4784 pipe_config->dpll_hw_state.dpll_md = tmp;
4785 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4786 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4787 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4788 pipe_config->pixel_multiplier =
4789 ((tmp & SDVO_MULTIPLIER_MASK)
4790 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4792 /* Note that on i915G/GM the pixel multiplier is in the sdvo
4793 * port and will be fixed up in the encoder->get_config
4795 pipe_config->pixel_multiplier = 1;
4797 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4799 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4800 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4802 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4805 /* Mask out read-only status bits. */
4806 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
4807 DPLL_PORTC_READY_MASK |
4808 DPLL_PORTB_READY_MASK);
4811 if (IS_CHERRYVIEW(dev_priv))
4812 chv_crtc_clock_get(crtc, pipe_config);
4813 else if (IS_VALLEYVIEW(dev_priv))
4814 vlv_crtc_clock_get(crtc, pipe_config);
4816 i9xx_crtc_clock_get(crtc, pipe_config);
4819 * Normally the dotclock is filled in by the encoder .get_config()
4820 * but in case the pipe is enabled w/o any ports we need a sane
4823 pipe_config->hw.adjusted_mode.crtc_clock =
4824 pipe_config->port_clock / pipe_config->pixel_multiplier;
4829 intel_display_power_put(dev_priv, power_domain, wakeref);
4834 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
4836 struct intel_encoder *encoder;
4839 bool has_lvds = false;
4840 bool has_cpu_edp = false;
4841 bool has_panel = false;
4842 bool has_ck505 = false;
4843 bool can_ssc = false;
4844 bool using_ssc_source = false;
4846 /* We need to take the global config into account */
4847 for_each_intel_encoder(&dev_priv->drm, encoder) {
4848 switch (encoder->type) {
4849 case INTEL_OUTPUT_LVDS:
4853 case INTEL_OUTPUT_EDP:
4855 if (encoder->port == PORT_A)
4863 if (HAS_PCH_IBX(dev_priv)) {
4864 has_ck505 = dev_priv->vbt.display_clock_mode;
4865 can_ssc = has_ck505;
4871 /* Check if any DPLLs are using the SSC source */
4872 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
4873 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
4875 if (!(temp & DPLL_VCO_ENABLE))
4878 if ((temp & PLL_REF_INPUT_MASK) ==
4879 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4880 using_ssc_source = true;
4885 drm_dbg_kms(&dev_priv->drm,
4886 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
4887 has_panel, has_lvds, has_ck505, using_ssc_source);
4889 /* Ironlake: try to setup display ref clock before DPLL
4890 * enabling. This is only under driver's control after
4891 * PCH B stepping, previous chipset stepping should be
4892 * ignoring this setting.
4894 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
4896 /* As we must carefully and slowly disable/enable each source in turn,
4897 * compute the final state we want first and check if we need to
4898 * make any changes at all.
4901 final &= ~DREF_NONSPREAD_SOURCE_MASK;
4903 final |= DREF_NONSPREAD_CK505_ENABLE;
4905 final |= DREF_NONSPREAD_SOURCE_ENABLE;
4907 final &= ~DREF_SSC_SOURCE_MASK;
4908 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4909 final &= ~DREF_SSC1_ENABLE;
4912 final |= DREF_SSC_SOURCE_ENABLE;
4914 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4915 final |= DREF_SSC1_ENABLE;
4918 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4919 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4921 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4923 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4924 } else if (using_ssc_source) {
4925 final |= DREF_SSC_SOURCE_ENABLE;
4926 final |= DREF_SSC1_ENABLE;
4932 /* Always enable nonspread source */
4933 val &= ~DREF_NONSPREAD_SOURCE_MASK;
4936 val |= DREF_NONSPREAD_CK505_ENABLE;
4938 val |= DREF_NONSPREAD_SOURCE_ENABLE;
4941 val &= ~DREF_SSC_SOURCE_MASK;
4942 val |= DREF_SSC_SOURCE_ENABLE;
4944 /* SSC must be turned on before enabling the CPU output */
4945 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4946 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
4947 val |= DREF_SSC1_ENABLE;
4949 val &= ~DREF_SSC1_ENABLE;
4951 /* Get SSC going before enabling the outputs */
4952 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4953 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4956 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4958 /* Enable CPU source on CPU attached eDP */
4960 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4961 drm_dbg_kms(&dev_priv->drm,
4962 "Using SSC on eDP\n");
4963 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4965 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4967 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4969 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4970 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4973 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
4975 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4977 /* Turn off CPU output */
4978 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4980 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4981 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4984 if (!using_ssc_source) {
4985 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
4987 /* Turn off the SSC source */
4988 val &= ~DREF_SSC_SOURCE_MASK;
4989 val |= DREF_SSC_SOURCE_DISABLE;
4992 val &= ~DREF_SSC1_ENABLE;
4994 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4995 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5000 BUG_ON(val != final);
5003 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5007 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5008 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5009 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5011 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5012 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5013 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5015 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5016 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5017 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5019 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5020 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5021 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5024 /* WaMPhyProgramming:hsw */
5025 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5029 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5030 tmp &= ~(0xFF << 24);
5031 tmp |= (0x12 << 24);
5032 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5034 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5036 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5038 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5040 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5042 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5043 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5044 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5046 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5047 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5048 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5050 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5053 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5055 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5058 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5060 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5063 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5065 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5068 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5070 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5071 tmp &= ~(0xFF << 16);
5072 tmp |= (0x1C << 16);
5073 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5075 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5076 tmp &= ~(0xFF << 16);
5077 tmp |= (0x1C << 16);
5078 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5080 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5082 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5084 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5086 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5088 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5089 tmp &= ~(0xF << 28);
5091 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5093 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5094 tmp &= ~(0xF << 28);
5096 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5099 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5100 * Programming" based on the parameters passed:
5101 * - Sequence to enable CLKOUT_DP
5102 * - Sequence to enable CLKOUT_DP without spread
5103 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5105 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5106 bool with_spread, bool with_fdi)
5110 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5111 "FDI requires downspread\n"))
5113 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5114 with_fdi, "LP PCH doesn't have FDI\n"))
5117 mutex_lock(&dev_priv->sb_lock);
5119 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5120 tmp &= ~SBI_SSCCTL_DISABLE;
5121 tmp |= SBI_SSCCTL_PATHALT;
5122 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5127 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5128 tmp &= ~SBI_SSCCTL_PATHALT;
5129 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5132 lpt_reset_fdi_mphy(dev_priv);
5133 lpt_program_fdi_mphy(dev_priv);
5137 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5138 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5139 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5140 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5142 mutex_unlock(&dev_priv->sb_lock);
5145 /* Sequence to disable CLKOUT_DP */
5146 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5150 mutex_lock(&dev_priv->sb_lock);
5152 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5153 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5154 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5155 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5157 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5158 if (!(tmp & SBI_SSCCTL_DISABLE)) {
5159 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5160 tmp |= SBI_SSCCTL_PATHALT;
5161 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5164 tmp |= SBI_SSCCTL_DISABLE;
5165 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5168 mutex_unlock(&dev_priv->sb_lock);
5171 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5173 static const u16 sscdivintphase[] = {
5174 [BEND_IDX( 50)] = 0x3B23,
5175 [BEND_IDX( 45)] = 0x3B23,
5176 [BEND_IDX( 40)] = 0x3C23,
5177 [BEND_IDX( 35)] = 0x3C23,
5178 [BEND_IDX( 30)] = 0x3D23,
5179 [BEND_IDX( 25)] = 0x3D23,
5180 [BEND_IDX( 20)] = 0x3E23,
5181 [BEND_IDX( 15)] = 0x3E23,
5182 [BEND_IDX( 10)] = 0x3F23,
5183 [BEND_IDX( 5)] = 0x3F23,
5184 [BEND_IDX( 0)] = 0x0025,
5185 [BEND_IDX( -5)] = 0x0025,
5186 [BEND_IDX(-10)] = 0x0125,
5187 [BEND_IDX(-15)] = 0x0125,
5188 [BEND_IDX(-20)] = 0x0225,
5189 [BEND_IDX(-25)] = 0x0225,
5190 [BEND_IDX(-30)] = 0x0325,
5191 [BEND_IDX(-35)] = 0x0325,
5192 [BEND_IDX(-40)] = 0x0425,
5193 [BEND_IDX(-45)] = 0x0425,
5194 [BEND_IDX(-50)] = 0x0525,
5199 * steps -50 to 50 inclusive, in steps of 5
5200 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5201 * change in clock period = -(steps / 10) * 5.787 ps
5203 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5206 int idx = BEND_IDX(steps);
5208 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5211 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5214 mutex_lock(&dev_priv->sb_lock);
5216 if (steps % 10 != 0)
5220 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5222 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5224 tmp |= sscdivintphase[idx];
5225 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5227 mutex_unlock(&dev_priv->sb_lock);
5232 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5234 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5235 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5237 if ((ctl & SPLL_PLL_ENABLE) == 0)
5240 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5241 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5244 if (IS_BROADWELL(dev_priv) &&
5245 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5251 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5252 enum intel_dpll_id id)
5254 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5255 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5257 if ((ctl & WRPLL_PLL_ENABLE) == 0)
5260 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5263 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5264 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5265 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5271 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5273 struct intel_encoder *encoder;
5274 bool has_fdi = false;
5276 for_each_intel_encoder(&dev_priv->drm, encoder) {
5277 switch (encoder->type) {
5278 case INTEL_OUTPUT_ANALOG:
5287 * The BIOS may have decided to use the PCH SSC
5288 * reference so we must not disable it until the
5289 * relevant PLLs have stopped relying on it. We'll
5290 * just leave the PCH SSC reference enabled in case
5291 * any active PLL is using it. It will get disabled
5292 * after runtime suspend if we don't have FDI.
5294 * TODO: Move the whole reference clock handling
5295 * to the modeset sequence proper so that we can
5296 * actually enable/disable/reconfigure these things
5297 * safely. To do that we need to introduce a real
5298 * clock hierarchy. That would also allow us to do
5299 * clock bending finally.
5301 dev_priv->pch_ssc_use = 0;
5303 if (spll_uses_pch_ssc(dev_priv)) {
5304 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5305 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5308 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5309 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5310 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5313 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5314 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5315 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5318 if (dev_priv->pch_ssc_use)
5322 lpt_bend_clkout_dp(dev_priv, 0);
5323 lpt_enable_clkout_dp(dev_priv, true, true);
5325 lpt_disable_clkout_dp(dev_priv);
5330 * Initialize reference clocks when the driver loads
5332 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5334 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5335 ilk_init_pch_refclk(dev_priv);
5336 else if (HAS_PCH_LPT(dev_priv))
5337 lpt_init_pch_refclk(dev_priv);
5340 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5342 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5343 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5344 enum pipe pipe = crtc->pipe;
5349 switch (crtc_state->pipe_bpp) {
5351 val |= PIPECONF_6BPC;
5354 val |= PIPECONF_8BPC;
5357 val |= PIPECONF_10BPC;
5360 val |= PIPECONF_12BPC;
5363 /* Case prevented by intel_choose_pipe_bpp_dither. */
5367 if (crtc_state->dither)
5368 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5370 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5371 val |= PIPECONF_INTERLACED_ILK;
5373 val |= PIPECONF_PROGRESSIVE;
5376 * This would end up with an odd purple hue over
5377 * the entire display. Make sure we don't do it.
5379 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5380 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5382 if (crtc_state->limited_color_range &&
5383 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5384 val |= PIPECONF_COLOR_RANGE_SELECT;
5386 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5387 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5389 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5391 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5393 intel_de_write(dev_priv, PIPECONF(pipe), val);
5394 intel_de_posting_read(dev_priv, PIPECONF(pipe));
5397 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5399 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5400 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5401 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5404 if (IS_HASWELL(dev_priv) && crtc_state->dither)
5405 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5407 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5408 val |= PIPECONF_INTERLACED_ILK;
5410 val |= PIPECONF_PROGRESSIVE;
5412 if (IS_HASWELL(dev_priv) &&
5413 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5414 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5416 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5417 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5420 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5422 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5423 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5426 switch (crtc_state->pipe_bpp) {
5428 val |= PIPEMISC_DITHER_6_BPC;
5431 val |= PIPEMISC_DITHER_8_BPC;
5434 val |= PIPEMISC_DITHER_10_BPC;
5437 val |= PIPEMISC_DITHER_12_BPC;
5440 MISSING_CASE(crtc_state->pipe_bpp);
5444 if (crtc_state->dither)
5445 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5447 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5448 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5449 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5451 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5452 val |= PIPEMISC_YUV420_ENABLE |
5453 PIPEMISC_YUV420_MODE_FULL_BLEND;
5455 if (DISPLAY_VER(dev_priv) >= 11 &&
5456 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5457 BIT(PLANE_CURSOR))) == 0)
5458 val |= PIPEMISC_HDR_MODE_PRECISION;
5460 if (DISPLAY_VER(dev_priv) >= 12)
5461 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5463 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5466 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5471 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5473 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
5474 case PIPEMISC_DITHER_6_BPC:
5476 case PIPEMISC_DITHER_8_BPC:
5478 case PIPEMISC_DITHER_10_BPC:
5480 case PIPEMISC_DITHER_12_BPC:
5488 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5491 * Account for spread spectrum to avoid
5492 * oversubscribing the link. Max center spread
5493 * is 2.5%; use 5% for safety's sake.
5495 u32 bps = target_clock * bpp * 21 / 20;
5496 return DIV_ROUND_UP(bps, link_bw * 8);
5499 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5500 struct intel_link_m_n *m_n)
5502 struct drm_device *dev = crtc->base.dev;
5503 struct drm_i915_private *dev_priv = to_i915(dev);
5504 enum pipe pipe = crtc->pipe;
5506 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5507 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5508 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5510 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5511 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5512 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5515 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5516 enum transcoder transcoder,
5517 struct intel_link_m_n *m_n,
5518 struct intel_link_m_n *m2_n2)
5520 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5521 enum pipe pipe = crtc->pipe;
5523 if (DISPLAY_VER(dev_priv) >= 5) {
5524 m_n->link_m = intel_de_read(dev_priv,
5525 PIPE_LINK_M1(transcoder));
5526 m_n->link_n = intel_de_read(dev_priv,
5527 PIPE_LINK_N1(transcoder));
5528 m_n->gmch_m = intel_de_read(dev_priv,
5529 PIPE_DATA_M1(transcoder))
5531 m_n->gmch_n = intel_de_read(dev_priv,
5532 PIPE_DATA_N1(transcoder));
5533 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5534 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5536 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5537 m2_n2->link_m = intel_de_read(dev_priv,
5538 PIPE_LINK_M2(transcoder));
5539 m2_n2->link_n = intel_de_read(dev_priv,
5540 PIPE_LINK_N2(transcoder));
5541 m2_n2->gmch_m = intel_de_read(dev_priv,
5542 PIPE_DATA_M2(transcoder))
5544 m2_n2->gmch_n = intel_de_read(dev_priv,
5545 PIPE_DATA_N2(transcoder));
5546 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5547 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5550 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5551 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5552 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5554 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5555 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5556 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5560 void intel_dp_get_m_n(struct intel_crtc *crtc,
5561 struct intel_crtc_state *pipe_config)
5563 if (pipe_config->has_pch_encoder)
5564 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5566 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5567 &pipe_config->dp_m_n,
5568 &pipe_config->dp_m2_n2);
5571 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5572 struct intel_crtc_state *pipe_config)
5574 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5575 &pipe_config->fdi_m_n, NULL);
5578 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5581 drm_rect_init(&crtc_state->pch_pfit.dst,
5582 pos >> 16, pos & 0xffff,
5583 size >> 16, size & 0xffff);
5586 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5588 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5589 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5590 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5594 /* find scaler attached to this pipe */
5595 for (i = 0; i < crtc->num_scalers; i++) {
5598 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5599 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5603 crtc_state->pch_pfit.enabled = true;
5605 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5606 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5608 ilk_get_pfit_pos_size(crtc_state, pos, size);
5610 scaler_state->scalers[i].in_use = true;
5614 scaler_state->scaler_id = id;
5616 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5618 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5621 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5623 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5624 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5627 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5628 if ((ctl & PF_ENABLE) == 0)
5631 crtc_state->pch_pfit.enabled = true;
5633 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5634 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5636 ilk_get_pfit_pos_size(crtc_state, pos, size);
5639 * We currently do not free assignements of panel fitters on
5640 * ivb/hsw (since we don't use the higher upscaling modes which
5641 * differentiates them) so just WARN about this case for now.
5643 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5644 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5647 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5648 struct intel_crtc_state *pipe_config)
5650 struct drm_device *dev = crtc->base.dev;
5651 struct drm_i915_private *dev_priv = to_i915(dev);
5652 enum intel_display_power_domain power_domain;
5653 intel_wakeref_t wakeref;
5657 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5658 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5662 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5663 pipe_config->shared_dpll = NULL;
5666 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5667 if (!(tmp & PIPECONF_ENABLE))
5670 switch (tmp & PIPECONF_BPC_MASK) {
5672 pipe_config->pipe_bpp = 18;
5675 pipe_config->pipe_bpp = 24;
5677 case PIPECONF_10BPC:
5678 pipe_config->pipe_bpp = 30;
5680 case PIPECONF_12BPC:
5681 pipe_config->pipe_bpp = 36;
5687 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5688 pipe_config->limited_color_range = true;
5690 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5691 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5692 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5693 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5696 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5700 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5701 PIPECONF_GAMMA_MODE_SHIFT;
5703 pipe_config->csc_mode = intel_de_read(dev_priv,
5704 PIPE_CSC_MODE(crtc->pipe));
5706 i9xx_get_pipe_color_config(pipe_config);
5707 intel_color_get_config(pipe_config);
5709 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5710 struct intel_shared_dpll *pll;
5711 enum intel_dpll_id pll_id;
5714 pipe_config->has_pch_encoder = true;
5716 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5717 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5718 FDI_DP_PORT_WIDTH_SHIFT) + 1;
5720 ilk_get_fdi_m_n_config(crtc, pipe_config);
5722 if (HAS_PCH_IBX(dev_priv)) {
5724 * The pipe->pch transcoder and pch transcoder->pll
5727 pll_id = (enum intel_dpll_id) crtc->pipe;
5729 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5730 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5731 pll_id = DPLL_ID_PCH_PLL_B;
5733 pll_id= DPLL_ID_PCH_PLL_A;
5736 pipe_config->shared_dpll =
5737 intel_get_shared_dpll_by_id(dev_priv, pll_id);
5738 pll = pipe_config->shared_dpll;
5740 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5741 &pipe_config->dpll_hw_state);
5742 drm_WARN_ON(dev, !pll_active);
5744 tmp = pipe_config->dpll_hw_state.dpll;
5745 pipe_config->pixel_multiplier =
5746 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5747 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5749 ilk_pch_clock_get(crtc, pipe_config);
5751 pipe_config->pixel_multiplier = 1;
5754 intel_get_transcoder_timings(crtc, pipe_config);
5755 intel_get_pipe_src_size(crtc, pipe_config);
5757 ilk_get_pfit_config(pipe_config);
5762 intel_display_power_put(dev_priv, power_domain, wakeref);
5767 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5768 struct intel_crtc_state *pipe_config,
5769 struct intel_display_power_domain_set *power_domain_set)
5771 struct drm_device *dev = crtc->base.dev;
5772 struct drm_i915_private *dev_priv = to_i915(dev);
5773 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
5774 unsigned long enabled_panel_transcoders = 0;
5775 enum transcoder panel_transcoder;
5778 if (DISPLAY_VER(dev_priv) >= 11)
5779 panel_transcoder_mask |=
5780 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
5783 * The pipe->transcoder mapping is fixed with the exception of the eDP
5784 * and DSI transcoders handled below.
5786 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5789 * XXX: Do intel_display_power_get_if_enabled before reading this (for
5790 * consistency and less surprising code; it's in always on power).
5792 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
5793 panel_transcoder_mask) {
5794 bool force_thru = false;
5795 enum pipe trans_pipe;
5797 tmp = intel_de_read(dev_priv,
5798 TRANS_DDI_FUNC_CTL(panel_transcoder));
5799 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5803 * Log all enabled ones, only use the first one.
5805 * FIXME: This won't work for two separate DSI displays.
5807 enabled_panel_transcoders |= BIT(panel_transcoder);
5808 if (enabled_panel_transcoders != BIT(panel_transcoder))
5811 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5814 "unknown pipe linked to transcoder %s\n",
5815 transcoder_name(panel_transcoder));
5817 case TRANS_DDI_EDP_INPUT_A_ONOFF:
5820 case TRANS_DDI_EDP_INPUT_A_ON:
5821 trans_pipe = PIPE_A;
5823 case TRANS_DDI_EDP_INPUT_B_ONOFF:
5824 trans_pipe = PIPE_B;
5826 case TRANS_DDI_EDP_INPUT_C_ONOFF:
5827 trans_pipe = PIPE_C;
5829 case TRANS_DDI_EDP_INPUT_D_ONOFF:
5830 trans_pipe = PIPE_D;
5834 if (trans_pipe == crtc->pipe) {
5835 pipe_config->cpu_transcoder = panel_transcoder;
5836 pipe_config->pch_pfit.force_thru = force_thru;
5841 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
5843 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
5844 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
5846 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5847 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5850 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
5852 return tmp & PIPECONF_ENABLE;
5855 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
5856 struct intel_crtc_state *pipe_config,
5857 struct intel_display_power_domain_set *power_domain_set)
5859 struct drm_device *dev = crtc->base.dev;
5860 struct drm_i915_private *dev_priv = to_i915(dev);
5861 enum transcoder cpu_transcoder;
5865 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
5867 cpu_transcoder = TRANSCODER_DSI_A;
5869 cpu_transcoder = TRANSCODER_DSI_C;
5871 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5872 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
5876 * The PLL needs to be enabled with a valid divider
5877 * configuration, otherwise accessing DSI registers will hang
5878 * the machine. See BSpec North Display Engine
5879 * registers/MIPI[BXT]. We can break out here early, since we
5880 * need the same DSI PLL to be enabled for both DSI ports.
5882 if (!bxt_dsi_pll_is_enabled(dev_priv))
5885 /* XXX: this works for video mode only */
5886 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
5887 if (!(tmp & DPI_ENABLE))
5890 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
5891 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
5894 pipe_config->cpu_transcoder = cpu_transcoder;
5898 return transcoder_is_dsi(pipe_config->cpu_transcoder);
5901 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
5902 struct intel_crtc_state *pipe_config)
5904 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5905 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5909 if (transcoder_is_dsi(cpu_transcoder)) {
5910 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
5913 tmp = intel_de_read(dev_priv,
5914 TRANS_DDI_FUNC_CTL(cpu_transcoder));
5915 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5917 if (DISPLAY_VER(dev_priv) >= 12)
5918 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5920 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5924 * Haswell has only FDI/PCH transcoder A. It is which is connected to
5925 * DDI E. So just check whether this pipe is wired to DDI E and whether
5926 * the PCH transcoder is on.
5928 if (DISPLAY_VER(dev_priv) < 9 &&
5929 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
5930 pipe_config->has_pch_encoder = true;
5932 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
5933 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5934 FDI_DP_PORT_WIDTH_SHIFT) + 1;
5936 ilk_get_fdi_m_n_config(crtc, pipe_config);
5940 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
5941 struct intel_crtc_state *pipe_config)
5943 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5944 struct intel_display_power_domain_set power_domain_set = { };
5948 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5949 POWER_DOMAIN_PIPE(crtc->pipe)))
5952 pipe_config->shared_dpll = NULL;
5954 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
5956 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5957 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
5958 drm_WARN_ON(&dev_priv->drm, active);
5962 intel_dsc_get_config(pipe_config);
5965 /* bigjoiner slave doesn't enable transcoder */
5966 if (!pipe_config->bigjoiner_slave)
5970 pipe_config->pixel_multiplier = 1;
5972 /* we cannot read out most state, so don't bother.. */
5973 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
5974 } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
5975 DISPLAY_VER(dev_priv) >= 11) {
5976 hsw_get_ddi_port_state(crtc, pipe_config);
5977 intel_get_transcoder_timings(crtc, pipe_config);
5980 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
5981 intel_vrr_get_config(crtc, pipe_config);
5983 intel_get_pipe_src_size(crtc, pipe_config);
5985 if (IS_HASWELL(dev_priv)) {
5986 u32 tmp = intel_de_read(dev_priv,
5987 PIPECONF(pipe_config->cpu_transcoder));
5989 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
5990 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5992 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5994 pipe_config->output_format =
5995 bdw_get_pipemisc_output_format(crtc);
5998 pipe_config->gamma_mode = intel_de_read(dev_priv,
5999 GAMMA_MODE(crtc->pipe));
6001 pipe_config->csc_mode = intel_de_read(dev_priv,
6002 PIPE_CSC_MODE(crtc->pipe));
6004 if (DISPLAY_VER(dev_priv) >= 9) {
6005 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6007 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6008 pipe_config->gamma_enable = true;
6010 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6011 pipe_config->csc_enable = true;
6013 i9xx_get_pipe_color_config(pipe_config);
6016 intel_color_get_config(pipe_config);
6018 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6019 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6020 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6021 pipe_config->ips_linetime =
6022 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6024 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6025 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6026 if (DISPLAY_VER(dev_priv) >= 9)
6027 skl_get_pfit_config(pipe_config);
6029 ilk_get_pfit_config(pipe_config);
6032 if (hsw_crtc_supports_ips(crtc)) {
6033 if (IS_HASWELL(dev_priv))
6034 pipe_config->ips_enabled = intel_de_read(dev_priv,
6035 IPS_CTL) & IPS_ENABLE;
6038 * We cannot readout IPS state on broadwell, set to
6039 * true so we can set it to a defined state on first
6042 pipe_config->ips_enabled = true;
6046 if (pipe_config->bigjoiner_slave) {
6047 /* Cannot be read out as a slave, set to 0. */
6048 pipe_config->pixel_multiplier = 0;
6049 } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6050 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6051 pipe_config->pixel_multiplier =
6052 intel_de_read(dev_priv,
6053 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6055 pipe_config->pixel_multiplier = 1;
6059 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6064 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6066 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6067 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6069 if (!i915->display.get_pipe_config(crtc, crtc_state))
6072 crtc_state->hw.active = true;
6074 intel_crtc_readout_derived_state(crtc_state);
6079 /* VESA 640x480x72Hz mode to set on the pipe */
6080 static const struct drm_display_mode load_detect_mode = {
6081 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6082 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6085 struct drm_framebuffer *
6086 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6087 struct drm_mode_fb_cmd2 *mode_cmd)
6089 struct intel_framebuffer *intel_fb;
6092 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6094 return ERR_PTR(-ENOMEM);
6096 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6100 return &intel_fb->base;
6104 return ERR_PTR(ret);
6107 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6108 struct drm_crtc *crtc)
6110 struct drm_plane *plane;
6111 struct drm_plane_state *plane_state;
6114 ret = drm_atomic_add_affected_planes(state, crtc);
6118 for_each_new_plane_in_state(state, plane, plane_state, i) {
6119 if (plane_state->crtc != crtc)
6122 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6126 drm_atomic_set_fb_for_plane(plane_state, NULL);
6132 int intel_get_load_detect_pipe(struct drm_connector *connector,
6133 struct intel_load_detect_pipe *old,
6134 struct drm_modeset_acquire_ctx *ctx)
6136 struct intel_crtc *intel_crtc;
6137 struct intel_encoder *intel_encoder =
6138 intel_attached_encoder(to_intel_connector(connector));
6139 struct drm_crtc *possible_crtc;
6140 struct drm_encoder *encoder = &intel_encoder->base;
6141 struct drm_crtc *crtc = NULL;
6142 struct drm_device *dev = encoder->dev;
6143 struct drm_i915_private *dev_priv = to_i915(dev);
6144 struct drm_mode_config *config = &dev->mode_config;
6145 struct drm_atomic_state *state = NULL, *restore_state = NULL;
6146 struct drm_connector_state *connector_state;
6147 struct intel_crtc_state *crtc_state;
6150 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6151 connector->base.id, connector->name,
6152 encoder->base.id, encoder->name);
6154 old->restore_state = NULL;
6156 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6159 * Algorithm gets a little messy:
6161 * - if the connector already has an assigned crtc, use it (but make
6162 * sure it's on first)
6164 * - try to find the first unused crtc that can drive this connector,
6165 * and use that if we find one
6168 /* See if we already have a CRTC for this connector */
6169 if (connector->state->crtc) {
6170 crtc = connector->state->crtc;
6172 ret = drm_modeset_lock(&crtc->mutex, ctx);
6176 /* Make sure the crtc and connector are running */
6180 /* Find an unused one (if possible) */
6181 for_each_crtc(dev, possible_crtc) {
6183 if (!(encoder->possible_crtcs & (1 << i)))
6186 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
6190 if (possible_crtc->state->enable) {
6191 drm_modeset_unlock(&possible_crtc->mutex);
6195 crtc = possible_crtc;
6200 * If we didn't find an unused CRTC, don't use any.
6203 drm_dbg_kms(&dev_priv->drm,
6204 "no pipe available for load-detect\n");
6210 intel_crtc = to_intel_crtc(crtc);
6212 state = drm_atomic_state_alloc(dev);
6213 restore_state = drm_atomic_state_alloc(dev);
6214 if (!state || !restore_state) {
6219 state->acquire_ctx = ctx;
6220 restore_state->acquire_ctx = ctx;
6222 connector_state = drm_atomic_get_connector_state(state, connector);
6223 if (IS_ERR(connector_state)) {
6224 ret = PTR_ERR(connector_state);
6228 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
6232 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6233 if (IS_ERR(crtc_state)) {
6234 ret = PTR_ERR(crtc_state);
6238 crtc_state->uapi.active = true;
6240 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6245 ret = intel_modeset_disable_planes(state, crtc);
6249 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6251 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
6253 ret = drm_atomic_add_affected_planes(restore_state, crtc);
6255 drm_dbg_kms(&dev_priv->drm,
6256 "Failed to create a copy of old state to restore: %i\n",
6261 ret = drm_atomic_commit(state);
6263 drm_dbg_kms(&dev_priv->drm,
6264 "failed to set mode on load-detect pipe\n");
6268 old->restore_state = restore_state;
6269 drm_atomic_state_put(state);
6271 /* let the connector get through one full cycle before testing */
6272 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
6277 drm_atomic_state_put(state);
6280 if (restore_state) {
6281 drm_atomic_state_put(restore_state);
6282 restore_state = NULL;
6285 if (ret == -EDEADLK)
6291 void intel_release_load_detect_pipe(struct drm_connector *connector,
6292 struct intel_load_detect_pipe *old,
6293 struct drm_modeset_acquire_ctx *ctx)
6295 struct intel_encoder *intel_encoder =
6296 intel_attached_encoder(to_intel_connector(connector));
6297 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6298 struct drm_encoder *encoder = &intel_encoder->base;
6299 struct drm_atomic_state *state = old->restore_state;
6302 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6303 connector->base.id, connector->name,
6304 encoder->base.id, encoder->name);
6309 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6311 drm_dbg_kms(&i915->drm,
6312 "Couldn't release load detect pipe: %i\n", ret);
6313 drm_atomic_state_put(state);
6316 static int i9xx_pll_refclk(struct drm_device *dev,
6317 const struct intel_crtc_state *pipe_config)
6319 struct drm_i915_private *dev_priv = to_i915(dev);
6320 u32 dpll = pipe_config->dpll_hw_state.dpll;
6322 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6323 return dev_priv->vbt.lvds_ssc_freq;
6324 else if (HAS_PCH_SPLIT(dev_priv))
6326 else if (DISPLAY_VER(dev_priv) != 2)
6332 /* Returns the clock of the currently programmed mode of the given pipe. */
6333 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6334 struct intel_crtc_state *pipe_config)
6336 struct drm_device *dev = crtc->base.dev;
6337 struct drm_i915_private *dev_priv = to_i915(dev);
6338 enum pipe pipe = crtc->pipe;
6339 u32 dpll = pipe_config->dpll_hw_state.dpll;
6343 int refclk = i9xx_pll_refclk(dev, pipe_config);
6345 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6346 fp = pipe_config->dpll_hw_state.fp0;
6348 fp = pipe_config->dpll_hw_state.fp1;
6350 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6351 if (IS_PINEVIEW(dev_priv)) {
6352 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6353 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6355 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6356 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6359 if (DISPLAY_VER(dev_priv) != 2) {
6360 if (IS_PINEVIEW(dev_priv))
6361 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6362 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6364 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6365 DPLL_FPA01_P1_POST_DIV_SHIFT);
6367 switch (dpll & DPLL_MODE_MASK) {
6368 case DPLLB_MODE_DAC_SERIAL:
6369 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6372 case DPLLB_MODE_LVDS:
6373 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6377 drm_dbg_kms(&dev_priv->drm,
6378 "Unknown DPLL mode %08x in programmed "
6379 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6383 if (IS_PINEVIEW(dev_priv))
6384 port_clock = pnv_calc_dpll_params(refclk, &clock);
6386 port_clock = i9xx_calc_dpll_params(refclk, &clock);
6388 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6390 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6393 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6394 DPLL_FPA01_P1_POST_DIV_SHIFT);
6396 if (lvds & LVDS_CLKB_POWER_UP)
6401 if (dpll & PLL_P1_DIVIDE_BY_TWO)
6404 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6405 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6407 if (dpll & PLL_P2_DIVIDE_BY_4)
6413 port_clock = i9xx_calc_dpll_params(refclk, &clock);
6417 * This value includes pixel_multiplier. We will use
6418 * port_clock to compute adjusted_mode.crtc_clock in the
6419 * encoder's get_config() function.
6421 pipe_config->port_clock = port_clock;
6424 int intel_dotclock_calculate(int link_freq,
6425 const struct intel_link_m_n *m_n)
6428 * The calculation for the data clock is:
6429 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6430 * But we want to avoid losing precison if possible, so:
6431 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6433 * and the link clock is simpler:
6434 * link_clock = (m * link_clock) / n
6440 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6443 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6444 struct intel_crtc_state *pipe_config)
6446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6448 /* read out port_clock from the DPLL */
6449 i9xx_crtc_clock_get(crtc, pipe_config);
6452 * In case there is an active pipe without active ports,
6453 * we may need some idea for the dotclock anyway.
6454 * Calculate one based on the FDI configuration.
6456 pipe_config->hw.adjusted_mode.crtc_clock =
6457 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6458 &pipe_config->fdi_m_n);
6461 /* Returns the currently programmed mode of the given encoder. */
6462 struct drm_display_mode *
6463 intel_encoder_current_mode(struct intel_encoder *encoder)
6465 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6466 struct intel_crtc_state *crtc_state;
6467 struct drm_display_mode *mode;
6468 struct intel_crtc *crtc;
6471 if (!encoder->get_hw_state(encoder, &pipe))
6474 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6476 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6480 crtc_state = intel_crtc_state_alloc(crtc);
6486 if (!intel_crtc_get_pipe_config(crtc_state)) {
6492 intel_encoder_get_config(encoder, crtc_state);
6494 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6502 * intel_wm_need_update - Check whether watermarks need updating
6503 * @cur: current plane state
6504 * @new: new plane state
6506 * Check current plane state versus the new one to determine whether
6507 * watermarks need to be recalculated.
6509 * Returns true or false.
6511 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6512 struct intel_plane_state *new)
6514 /* Update watermarks on tiling or size changes. */
6515 if (new->uapi.visible != cur->uapi.visible)
6518 if (!cur->hw.fb || !new->hw.fb)
6521 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6522 cur->hw.rotation != new->hw.rotation ||
6523 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6524 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6525 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6526 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6532 static bool needs_scaling(const struct intel_plane_state *state)
6534 int src_w = drm_rect_width(&state->uapi.src) >> 16;
6535 int src_h = drm_rect_height(&state->uapi.src) >> 16;
6536 int dst_w = drm_rect_width(&state->uapi.dst);
6537 int dst_h = drm_rect_height(&state->uapi.dst);
6539 return (src_w != dst_w || src_h != dst_h);
6542 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6543 struct intel_crtc_state *crtc_state,
6544 const struct intel_plane_state *old_plane_state,
6545 struct intel_plane_state *plane_state)
6547 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6548 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6550 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6551 bool was_crtc_enabled = old_crtc_state->hw.active;
6552 bool is_crtc_enabled = crtc_state->hw.active;
6553 bool turn_off, turn_on, visible, was_visible;
6556 if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6557 ret = skl_update_scaler_plane(crtc_state, plane_state);
6562 was_visible = old_plane_state->uapi.visible;
6563 visible = plane_state->uapi.visible;
6565 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6566 was_visible = false;
6569 * Visibility is calculated as if the crtc was on, but
6570 * after scaler setup everything depends on it being off
6571 * when the crtc isn't active.
6573 * FIXME this is wrong for watermarks. Watermarks should also
6574 * be computed as if the pipe would be active. Perhaps move
6575 * per-plane wm computation to the .check_plane() hook, and
6576 * only combine the results from all planes in the current place?
6578 if (!is_crtc_enabled) {
6579 intel_plane_set_invisible(crtc_state, plane_state);
6583 if (!was_visible && !visible)
6586 turn_off = was_visible && (!visible || mode_changed);
6587 turn_on = visible && (!was_visible || mode_changed);
6589 drm_dbg_atomic(&dev_priv->drm,
6590 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6591 crtc->base.base.id, crtc->base.name,
6592 plane->base.base.id, plane->base.name,
6593 was_visible, visible,
6594 turn_off, turn_on, mode_changed);
6597 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6598 crtc_state->update_wm_pre = true;
6600 /* must disable cxsr around plane enable/disable */
6601 if (plane->id != PLANE_CURSOR)
6602 crtc_state->disable_cxsr = true;
6603 } else if (turn_off) {
6604 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6605 crtc_state->update_wm_post = true;
6607 /* must disable cxsr around plane enable/disable */
6608 if (plane->id != PLANE_CURSOR)
6609 crtc_state->disable_cxsr = true;
6610 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
6611 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6612 /* FIXME bollocks */
6613 crtc_state->update_wm_pre = true;
6614 crtc_state->update_wm_post = true;
6618 if (visible || was_visible)
6619 crtc_state->fb_bits |= plane->frontbuffer_bit;
6622 * ILK/SNB DVSACNTR/Sprite Enable
6623 * IVB SPR_CTL/Sprite Enable
6624 * "When in Self Refresh Big FIFO mode, a write to enable the
6625 * plane will be internally buffered and delayed while Big FIFO
6628 * Which means that enabling the sprite can take an extra frame
6629 * when we start in big FIFO mode (LP1+). Thus we need to drop
6630 * down to LP0 and wait for vblank in order to make sure the
6631 * sprite gets enabled on the next vblank after the register write.
6632 * Doing otherwise would risk enabling the sprite one frame after
6633 * we've already signalled flip completion. We can resume LP1+
6634 * once the sprite has been enabled.
6637 * WaCxSRDisabledForSpriteScaling:ivb
6638 * IVB SPR_SCALE/Scaling Enable
6639 * "Low Power watermarks must be disabled for at least one
6640 * frame before enabling sprite scaling, and kept disabled
6641 * until sprite scaling is disabled."
6643 * ILK/SNB DVSASCALE/Scaling Enable
6644 * "When in Self Refresh Big FIFO mode, scaling enable will be
6645 * masked off while Big FIFO mode is exiting."
6647 * Despite the w/a only being listed for IVB we assume that
6648 * the ILK/SNB note has similar ramifications, hence we apply
6649 * the w/a on all three platforms.
6651 * With experimental results seems this is needed also for primary
6652 * plane, not only sprite plane.
6654 if (plane->id != PLANE_CURSOR &&
6655 (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6656 IS_IVYBRIDGE(dev_priv)) &&
6657 (turn_on || (!needs_scaling(old_plane_state) &&
6658 needs_scaling(plane_state))))
6659 crtc_state->disable_lp_wm = true;
6664 static bool encoders_cloneable(const struct intel_encoder *a,
6665 const struct intel_encoder *b)
6667 /* masks could be asymmetric, so check both ways */
6668 return a == b || (a->cloneable & (1 << b->type) &&
6669 b->cloneable & (1 << a->type));
6672 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6673 struct intel_crtc *crtc,
6674 struct intel_encoder *encoder)
6676 struct intel_encoder *source_encoder;
6677 struct drm_connector *connector;
6678 struct drm_connector_state *connector_state;
6681 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6682 if (connector_state->crtc != &crtc->base)
6686 to_intel_encoder(connector_state->best_encoder);
6687 if (!encoders_cloneable(encoder, source_encoder))
6694 static int icl_add_linked_planes(struct intel_atomic_state *state)
6696 struct intel_plane *plane, *linked;
6697 struct intel_plane_state *plane_state, *linked_plane_state;
6700 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6701 linked = plane_state->planar_linked_plane;
6706 linked_plane_state = intel_atomic_get_plane_state(state, linked);
6707 if (IS_ERR(linked_plane_state))
6708 return PTR_ERR(linked_plane_state);
6710 drm_WARN_ON(state->base.dev,
6711 linked_plane_state->planar_linked_plane != plane);
6712 drm_WARN_ON(state->base.dev,
6713 linked_plane_state->planar_slave == plane_state->planar_slave);
6719 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6721 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6722 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6723 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6724 struct intel_plane *plane, *linked;
6725 struct intel_plane_state *plane_state;
6728 if (DISPLAY_VER(dev_priv) < 11)
6732 * Destroy all old plane links and make the slave plane invisible
6733 * in the crtc_state->active_planes mask.
6735 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6736 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6739 plane_state->planar_linked_plane = NULL;
6740 if (plane_state->planar_slave && !plane_state->uapi.visible) {
6741 crtc_state->enabled_planes &= ~BIT(plane->id);
6742 crtc_state->active_planes &= ~BIT(plane->id);
6743 crtc_state->update_planes |= BIT(plane->id);
6746 plane_state->planar_slave = false;
6749 if (!crtc_state->nv12_planes)
6752 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6753 struct intel_plane_state *linked_state = NULL;
6755 if (plane->pipe != crtc->pipe ||
6756 !(crtc_state->nv12_planes & BIT(plane->id)))
6759 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6760 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6763 if (crtc_state->active_planes & BIT(linked->id))
6766 linked_state = intel_atomic_get_plane_state(state, linked);
6767 if (IS_ERR(linked_state))
6768 return PTR_ERR(linked_state);
6773 if (!linked_state) {
6774 drm_dbg_kms(&dev_priv->drm,
6775 "Need %d free Y planes for planar YUV\n",
6776 hweight8(crtc_state->nv12_planes));
6781 plane_state->planar_linked_plane = linked;
6783 linked_state->planar_slave = true;
6784 linked_state->planar_linked_plane = plane;
6785 crtc_state->enabled_planes |= BIT(linked->id);
6786 crtc_state->active_planes |= BIT(linked->id);
6787 crtc_state->update_planes |= BIT(linked->id);
6788 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
6789 linked->base.name, plane->base.name);
6791 /* Copy parameters to slave plane */
6792 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
6793 linked_state->color_ctl = plane_state->color_ctl;
6794 linked_state->view = plane_state->view;
6796 intel_plane_copy_hw_state(linked_state, plane_state);
6797 linked_state->uapi.src = plane_state->uapi.src;
6798 linked_state->uapi.dst = plane_state->uapi.dst;
6800 if (icl_is_hdr_plane(dev_priv, plane->id)) {
6801 if (linked->id == PLANE_SPRITE5)
6802 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
6803 else if (linked->id == PLANE_SPRITE4)
6804 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
6805 else if (linked->id == PLANE_SPRITE3)
6806 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
6807 else if (linked->id == PLANE_SPRITE2)
6808 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
6810 MISSING_CASE(linked->id);
6817 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
6819 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6820 struct intel_atomic_state *state =
6821 to_intel_atomic_state(new_crtc_state->uapi.state);
6822 const struct intel_crtc_state *old_crtc_state =
6823 intel_atomic_get_old_crtc_state(state, crtc);
6825 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
6828 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
6830 const struct drm_display_mode *pipe_mode =
6831 &crtc_state->hw.pipe_mode;
6834 if (!crtc_state->hw.enable)
6837 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6838 pipe_mode->crtc_clock);
6840 return min(linetime_wm, 0x1ff);
6843 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
6844 const struct intel_cdclk_state *cdclk_state)
6846 const struct drm_display_mode *pipe_mode =
6847 &crtc_state->hw.pipe_mode;
6850 if (!crtc_state->hw.enable)
6853 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6854 cdclk_state->logical.cdclk);
6856 return min(linetime_wm, 0x1ff);
6859 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
6861 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6862 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6863 const struct drm_display_mode *pipe_mode =
6864 &crtc_state->hw.pipe_mode;
6867 if (!crtc_state->hw.enable)
6870 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
6871 crtc_state->pixel_rate);
6873 /* Display WA #1135: BXT:ALL GLK:ALL */
6874 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6875 dev_priv->ipc_enabled)
6878 return min(linetime_wm, 0x1ff);
6881 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
6882 struct intel_crtc *crtc)
6884 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6885 struct intel_crtc_state *crtc_state =
6886 intel_atomic_get_new_crtc_state(state, crtc);
6887 const struct intel_cdclk_state *cdclk_state;
6889 if (DISPLAY_VER(dev_priv) >= 9)
6890 crtc_state->linetime = skl_linetime_wm(crtc_state);
6892 crtc_state->linetime = hsw_linetime_wm(crtc_state);
6894 if (!hsw_crtc_supports_ips(crtc))
6897 cdclk_state = intel_atomic_get_cdclk_state(state);
6898 if (IS_ERR(cdclk_state))
6899 return PTR_ERR(cdclk_state);
6901 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
6907 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
6908 struct intel_crtc *crtc)
6910 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6911 struct intel_crtc_state *crtc_state =
6912 intel_atomic_get_new_crtc_state(state, crtc);
6913 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6916 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
6917 mode_changed && !crtc_state->hw.active)
6918 crtc_state->update_wm_post = true;
6920 if (mode_changed && crtc_state->hw.enable &&
6921 dev_priv->display.crtc_compute_clock &&
6922 !crtc_state->bigjoiner_slave &&
6923 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
6924 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
6930 * May need to update pipe gamma enable bits
6931 * when C8 planes are getting enabled/disabled.
6933 if (c8_planes_changed(crtc_state))
6934 crtc_state->uapi.color_mgmt_changed = true;
6936 if (mode_changed || crtc_state->update_pipe ||
6937 crtc_state->uapi.color_mgmt_changed) {
6938 ret = intel_color_check(crtc_state);
6943 if (dev_priv->display.compute_pipe_wm) {
6944 ret = dev_priv->display.compute_pipe_wm(crtc_state);
6946 drm_dbg_kms(&dev_priv->drm,
6947 "Target pipe watermarks are invalid\n");
6952 if (dev_priv->display.compute_intermediate_wm) {
6953 if (drm_WARN_ON(&dev_priv->drm,
6954 !dev_priv->display.compute_pipe_wm))
6958 * Calculate 'intermediate' watermarks that satisfy both the
6959 * old state and the new state. We can program these
6962 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
6964 drm_dbg_kms(&dev_priv->drm,
6965 "No valid intermediate pipe watermarks are possible\n");
6970 if (DISPLAY_VER(dev_priv) >= 9) {
6971 if (mode_changed || crtc_state->update_pipe) {
6972 ret = skl_update_scaler_crtc(crtc_state);
6977 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
6982 if (HAS_IPS(dev_priv)) {
6983 ret = hsw_compute_ips_config(crtc_state);
6988 if (DISPLAY_VER(dev_priv) >= 9 ||
6989 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
6990 ret = hsw_compute_linetime_wm(state, crtc);
6996 if (!mode_changed) {
6997 ret = intel_psr2_sel_fetch_update(state, crtc);
7005 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7007 struct intel_connector *connector;
7008 struct drm_connector_list_iter conn_iter;
7010 drm_connector_list_iter_begin(dev, &conn_iter);
7011 for_each_intel_connector_iter(connector, &conn_iter) {
7012 struct drm_connector_state *conn_state = connector->base.state;
7013 struct intel_encoder *encoder =
7014 to_intel_encoder(connector->base.encoder);
7016 if (conn_state->crtc)
7017 drm_connector_put(&connector->base);
7020 struct intel_crtc *crtc =
7021 to_intel_crtc(encoder->base.crtc);
7022 const struct intel_crtc_state *crtc_state =
7023 to_intel_crtc_state(crtc->base.state);
7025 conn_state->best_encoder = &encoder->base;
7026 conn_state->crtc = &crtc->base;
7027 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7029 drm_connector_get(&connector->base);
7031 conn_state->best_encoder = NULL;
7032 conn_state->crtc = NULL;
7035 drm_connector_list_iter_end(&conn_iter);
7039 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7040 struct intel_crtc_state *pipe_config)
7042 struct drm_connector *connector = conn_state->connector;
7043 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7044 const struct drm_display_info *info = &connector->display_info;
7047 switch (conn_state->max_bpc) {
7061 MISSING_CASE(conn_state->max_bpc);
7065 if (bpp < pipe_config->pipe_bpp) {
7066 drm_dbg_kms(&i915->drm,
7067 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7068 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7069 connector->base.id, connector->name,
7071 3 * conn_state->max_requested_bpc,
7072 pipe_config->pipe_bpp);
7074 pipe_config->pipe_bpp = bpp;
7081 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7082 struct intel_crtc_state *pipe_config)
7084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7085 struct drm_atomic_state *state = pipe_config->uapi.state;
7086 struct drm_connector *connector;
7087 struct drm_connector_state *connector_state;
7090 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7091 IS_CHERRYVIEW(dev_priv)))
7093 else if (DISPLAY_VER(dev_priv) >= 5)
7098 pipe_config->pipe_bpp = bpp;
7100 /* Clamp display bpp to connector max bpp */
7101 for_each_new_connector_in_state(state, connector, connector_state, i) {
7104 if (connector_state->crtc != &crtc->base)
7107 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7115 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7116 const struct drm_display_mode *mode)
7118 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7119 "type: 0x%x flags: 0x%x\n",
7121 mode->crtc_hdisplay, mode->crtc_hsync_start,
7122 mode->crtc_hsync_end, mode->crtc_htotal,
7123 mode->crtc_vdisplay, mode->crtc_vsync_start,
7124 mode->crtc_vsync_end, mode->crtc_vtotal,
7125 mode->type, mode->flags);
7129 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7130 const char *id, unsigned int lane_count,
7131 const struct intel_link_m_n *m_n)
7133 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7135 drm_dbg_kms(&i915->drm,
7136 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7138 m_n->gmch_m, m_n->gmch_n,
7139 m_n->link_m, m_n->link_n, m_n->tu);
7143 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7144 const union hdmi_infoframe *frame)
7146 if (!drm_debug_enabled(DRM_UT_KMS))
7149 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7153 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7154 const struct drm_dp_vsc_sdp *vsc)
7156 if (!drm_debug_enabled(DRM_UT_KMS))
7159 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7162 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7164 static const char * const output_type_str[] = {
7165 OUTPUT_TYPE(UNUSED),
7166 OUTPUT_TYPE(ANALOG),
7176 OUTPUT_TYPE(DP_MST),
7181 static void snprintf_output_types(char *buf, size_t len,
7182 unsigned int output_types)
7189 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7192 if ((output_types & BIT(i)) == 0)
7195 r = snprintf(str, len, "%s%s",
7196 str != buf ? "," : "", output_type_str[i]);
7202 output_types &= ~BIT(i);
7205 WARN_ON_ONCE(output_types != 0);
7208 static const char * const output_format_str[] = {
7209 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7210 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7211 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7214 static const char *output_formats(enum intel_output_format format)
7216 if (format >= ARRAY_SIZE(output_format_str))
7218 return output_format_str[format];
7221 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7223 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7224 struct drm_i915_private *i915 = to_i915(plane->base.dev);
7225 const struct drm_framebuffer *fb = plane_state->hw.fb;
7228 drm_dbg_kms(&i915->drm,
7229 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7230 plane->base.base.id, plane->base.name,
7231 yesno(plane_state->uapi.visible));
7235 drm_dbg_kms(&i915->drm,
7236 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7237 plane->base.base.id, plane->base.name,
7238 fb->base.id, fb->width, fb->height, &fb->format->format,
7239 fb->modifier, yesno(plane_state->uapi.visible));
7240 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7241 plane_state->hw.rotation, plane_state->scaler_id);
7242 if (plane_state->uapi.visible)
7243 drm_dbg_kms(&i915->drm,
7244 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7245 DRM_RECT_FP_ARG(&plane_state->uapi.src),
7246 DRM_RECT_ARG(&plane_state->uapi.dst));
7249 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7250 struct intel_atomic_state *state,
7251 const char *context)
7253 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7254 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7255 const struct intel_plane_state *plane_state;
7256 struct intel_plane *plane;
7260 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7261 crtc->base.base.id, crtc->base.name,
7262 yesno(pipe_config->hw.enable), context);
7264 if (!pipe_config->hw.enable)
7267 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7268 drm_dbg_kms(&dev_priv->drm,
7269 "active: %s, output_types: %s (0x%x), output format: %s\n",
7270 yesno(pipe_config->hw.active),
7271 buf, pipe_config->output_types,
7272 output_formats(pipe_config->output_format));
7274 drm_dbg_kms(&dev_priv->drm,
7275 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7276 transcoder_name(pipe_config->cpu_transcoder),
7277 pipe_config->pipe_bpp, pipe_config->dither);
7279 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7280 transcoder_name(pipe_config->mst_master_transcoder));
7282 drm_dbg_kms(&dev_priv->drm,
7283 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7284 transcoder_name(pipe_config->master_transcoder),
7285 pipe_config->sync_mode_slaves_mask);
7287 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7288 pipe_config->bigjoiner_slave ? "slave" :
7289 pipe_config->bigjoiner ? "master" : "no");
7291 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7292 enableddisabled(pipe_config->splitter.enable),
7293 pipe_config->splitter.link_count,
7294 pipe_config->splitter.pixel_overlap);
7296 if (pipe_config->has_pch_encoder)
7297 intel_dump_m_n_config(pipe_config, "fdi",
7298 pipe_config->fdi_lanes,
7299 &pipe_config->fdi_m_n);
7301 if (intel_crtc_has_dp_encoder(pipe_config)) {
7302 intel_dump_m_n_config(pipe_config, "dp m_n",
7303 pipe_config->lane_count, &pipe_config->dp_m_n);
7304 if (pipe_config->has_drrs)
7305 intel_dump_m_n_config(pipe_config, "dp m2_n2",
7306 pipe_config->lane_count,
7307 &pipe_config->dp_m2_n2);
7310 drm_dbg_kms(&dev_priv->drm,
7311 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7312 pipe_config->has_audio, pipe_config->has_infoframe,
7313 pipe_config->infoframes.enable);
7315 if (pipe_config->infoframes.enable &
7316 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7317 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7318 pipe_config->infoframes.gcp);
7319 if (pipe_config->infoframes.enable &
7320 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7321 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7322 if (pipe_config->infoframes.enable &
7323 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7324 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7325 if (pipe_config->infoframes.enable &
7326 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7327 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7328 if (pipe_config->infoframes.enable &
7329 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7330 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7331 if (pipe_config->infoframes.enable &
7332 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7333 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7334 if (pipe_config->infoframes.enable &
7335 intel_hdmi_infoframe_enable(DP_SDP_VSC))
7336 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7338 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7339 yesno(pipe_config->vrr.enable),
7340 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7341 pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
7342 intel_vrr_vmin_vblank_start(pipe_config),
7343 intel_vrr_vmax_vblank_start(pipe_config));
7345 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7346 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7347 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7348 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7349 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7350 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7351 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7352 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7353 drm_dbg_kms(&dev_priv->drm,
7354 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7355 pipe_config->port_clock,
7356 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7357 pipe_config->pixel_rate);
7359 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7360 pipe_config->linetime, pipe_config->ips_linetime);
7362 if (DISPLAY_VER(dev_priv) >= 9)
7363 drm_dbg_kms(&dev_priv->drm,
7364 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7366 pipe_config->scaler_state.scaler_users,
7367 pipe_config->scaler_state.scaler_id);
7369 if (HAS_GMCH(dev_priv))
7370 drm_dbg_kms(&dev_priv->drm,
7371 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7372 pipe_config->gmch_pfit.control,
7373 pipe_config->gmch_pfit.pgm_ratios,
7374 pipe_config->gmch_pfit.lvds_border_bits);
7376 drm_dbg_kms(&dev_priv->drm,
7377 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7378 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7379 enableddisabled(pipe_config->pch_pfit.enabled),
7380 yesno(pipe_config->pch_pfit.force_thru));
7382 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7383 pipe_config->ips_enabled, pipe_config->double_wide);
7385 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7387 if (IS_CHERRYVIEW(dev_priv))
7388 drm_dbg_kms(&dev_priv->drm,
7389 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7390 pipe_config->cgm_mode, pipe_config->gamma_mode,
7391 pipe_config->gamma_enable, pipe_config->csc_enable);
7393 drm_dbg_kms(&dev_priv->drm,
7394 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7395 pipe_config->csc_mode, pipe_config->gamma_mode,
7396 pipe_config->gamma_enable, pipe_config->csc_enable);
7398 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7399 pipe_config->hw.degamma_lut ?
7400 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7401 pipe_config->hw.gamma_lut ?
7402 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7408 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7409 if (plane->pipe == crtc->pipe)
7410 intel_dump_plane_state(plane_state);
7414 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7416 struct drm_device *dev = state->base.dev;
7417 struct drm_connector *connector;
7418 struct drm_connector_list_iter conn_iter;
7419 unsigned int used_ports = 0;
7420 unsigned int used_mst_ports = 0;
7424 * We're going to peek into connector->state,
7425 * hence connection_mutex must be held.
7427 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7430 * Walk the connector list instead of the encoder
7431 * list to detect the problem on ddi platforms
7432 * where there's just one encoder per digital port.
7434 drm_connector_list_iter_begin(dev, &conn_iter);
7435 drm_for_each_connector_iter(connector, &conn_iter) {
7436 struct drm_connector_state *connector_state;
7437 struct intel_encoder *encoder;
7440 drm_atomic_get_new_connector_state(&state->base,
7442 if (!connector_state)
7443 connector_state = connector->state;
7445 if (!connector_state->best_encoder)
7448 encoder = to_intel_encoder(connector_state->best_encoder);
7450 drm_WARN_ON(dev, !connector_state->crtc);
7452 switch (encoder->type) {
7453 case INTEL_OUTPUT_DDI:
7454 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7457 case INTEL_OUTPUT_DP:
7458 case INTEL_OUTPUT_HDMI:
7459 case INTEL_OUTPUT_EDP:
7460 /* the same port mustn't appear more than once */
7461 if (used_ports & BIT(encoder->port))
7464 used_ports |= BIT(encoder->port);
7466 case INTEL_OUTPUT_DP_MST:
7474 drm_connector_list_iter_end(&conn_iter);
7476 /* can't mix MST and SST/HDMI on the same port */
7477 if (used_ports & used_mst_ports)
7484 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7485 struct intel_crtc_state *crtc_state)
7487 const struct intel_crtc_state *from_crtc_state = crtc_state;
7489 if (crtc_state->bigjoiner_slave) {
7490 from_crtc_state = intel_atomic_get_new_crtc_state(state,
7491 crtc_state->bigjoiner_linked_crtc);
7493 /* No need to copy state if the master state is unchanged */
7494 if (!from_crtc_state)
7498 intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7502 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7503 struct intel_crtc_state *crtc_state)
7505 crtc_state->hw.enable = crtc_state->uapi.enable;
7506 crtc_state->hw.active = crtc_state->uapi.active;
7507 crtc_state->hw.mode = crtc_state->uapi.mode;
7508 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7509 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7511 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7514 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7516 if (crtc_state->bigjoiner_slave)
7519 crtc_state->uapi.enable = crtc_state->hw.enable;
7520 crtc_state->uapi.active = crtc_state->hw.active;
7521 drm_WARN_ON(crtc_state->uapi.crtc->dev,
7522 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7524 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7525 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7527 /* copy color blobs to uapi */
7528 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7529 crtc_state->hw.degamma_lut);
7530 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7531 crtc_state->hw.gamma_lut);
7532 drm_property_replace_blob(&crtc_state->uapi.ctm,
7533 crtc_state->hw.ctm);
7537 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7538 const struct intel_crtc_state *from_crtc_state)
7540 struct intel_crtc_state *saved_state;
7541 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7543 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7547 saved_state->uapi = crtc_state->uapi;
7548 saved_state->scaler_state = crtc_state->scaler_state;
7549 saved_state->shared_dpll = crtc_state->shared_dpll;
7550 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7551 saved_state->crc_enabled = crtc_state->crc_enabled;
7553 intel_crtc_free_hw_state(crtc_state);
7554 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7557 /* Re-init hw state */
7558 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7559 crtc_state->hw.enable = from_crtc_state->hw.enable;
7560 crtc_state->hw.active = from_crtc_state->hw.active;
7561 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7562 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7565 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7566 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7567 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7568 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7569 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7570 crtc_state->bigjoiner_slave = true;
7571 crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7572 crtc_state->has_audio = false;
7578 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7579 struct intel_crtc_state *crtc_state)
7581 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7582 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7583 struct intel_crtc_state *saved_state;
7585 saved_state = intel_crtc_state_alloc(crtc);
7589 /* free the old crtc_state->hw members */
7590 intel_crtc_free_hw_state(crtc_state);
7592 /* FIXME: before the switch to atomic started, a new pipe_config was
7593 * kzalloc'd. Code that depends on any field being zero should be
7594 * fixed, so that the crtc_state can be safely duplicated. For now,
7595 * only fields that are know to not cause problems are preserved. */
7597 saved_state->uapi = crtc_state->uapi;
7598 saved_state->scaler_state = crtc_state->scaler_state;
7599 saved_state->shared_dpll = crtc_state->shared_dpll;
7600 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7601 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7602 sizeof(saved_state->icl_port_dplls));
7603 saved_state->crc_enabled = crtc_state->crc_enabled;
7604 if (IS_G4X(dev_priv) ||
7605 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7606 saved_state->wm = crtc_state->wm;
7608 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7611 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7617 intel_modeset_pipe_config(struct intel_atomic_state *state,
7618 struct intel_crtc_state *pipe_config)
7620 struct drm_crtc *crtc = pipe_config->uapi.crtc;
7621 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7622 struct drm_connector *connector;
7623 struct drm_connector_state *connector_state;
7624 int base_bpp, ret, i;
7627 pipe_config->cpu_transcoder =
7628 (enum transcoder) to_intel_crtc(crtc)->pipe;
7631 * Sanitize sync polarity flags based on requested ones. If neither
7632 * positive or negative polarity is requested, treat this as meaning
7633 * negative polarity.
7635 if (!(pipe_config->hw.adjusted_mode.flags &
7636 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7637 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7639 if (!(pipe_config->hw.adjusted_mode.flags &
7640 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7641 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7643 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7648 base_bpp = pipe_config->pipe_bpp;
7651 * Determine the real pipe dimensions. Note that stereo modes can
7652 * increase the actual pipe size due to the frame doubling and
7653 * insertion of additional space for blanks between the frame. This
7654 * is stored in the crtc timings. We use the requested mode to do this
7655 * computation to clearly distinguish it from the adjusted mode, which
7656 * can be changed by the connectors in the below retry loop.
7658 drm_mode_get_hv_timing(&pipe_config->hw.mode,
7659 &pipe_config->pipe_src_w,
7660 &pipe_config->pipe_src_h);
7662 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7663 struct intel_encoder *encoder =
7664 to_intel_encoder(connector_state->best_encoder);
7666 if (connector_state->crtc != crtc)
7669 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7670 drm_dbg_kms(&i915->drm,
7671 "rejecting invalid cloning configuration\n");
7676 * Determine output_types before calling the .compute_config()
7677 * hooks so that the hooks can use this information safely.
7679 if (encoder->compute_output_type)
7680 pipe_config->output_types |=
7681 BIT(encoder->compute_output_type(encoder, pipe_config,
7684 pipe_config->output_types |= BIT(encoder->type);
7688 /* Ensure the port clock defaults are reset when retrying. */
7689 pipe_config->port_clock = 0;
7690 pipe_config->pixel_multiplier = 1;
7692 /* Fill in default crtc timings, allow encoders to overwrite them. */
7693 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7694 CRTC_STEREO_DOUBLE);
7696 /* Pass our mode to the connectors and the CRTC to give them a chance to
7697 * adjust it according to limitations or connector properties, and also
7698 * a chance to reject the mode entirely.
7700 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7701 struct intel_encoder *encoder =
7702 to_intel_encoder(connector_state->best_encoder);
7704 if (connector_state->crtc != crtc)
7707 ret = encoder->compute_config(encoder, pipe_config,
7710 if (ret != -EDEADLK)
7711 drm_dbg_kms(&i915->drm,
7712 "Encoder config failure: %d\n",
7718 /* Set default port clock if not overwritten by the encoder. Needs to be
7719 * done afterwards in case the encoder adjusts the mode. */
7720 if (!pipe_config->port_clock)
7721 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7722 * pipe_config->pixel_multiplier;
7724 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7725 if (ret == -EDEADLK)
7728 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7732 if (ret == I915_DISPLAY_CONFIG_RETRY) {
7733 if (drm_WARN(&i915->drm, !retry,
7734 "loop in pipe configuration computation\n"))
7737 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7742 /* Dithering seems to not pass-through bits correctly when it should, so
7743 * only enable it on 6bpc panels and when its not a compliance
7744 * test requesting 6bpc video pattern.
7746 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7747 !pipe_config->dither_force_disable;
7748 drm_dbg_kms(&i915->drm,
7749 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7750 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7756 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7758 struct intel_atomic_state *state =
7759 to_intel_atomic_state(crtc_state->uapi.state);
7760 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7761 struct drm_connector_state *conn_state;
7762 struct drm_connector *connector;
7765 for_each_new_connector_in_state(&state->base, connector,
7767 struct intel_encoder *encoder =
7768 to_intel_encoder(conn_state->best_encoder);
7771 if (conn_state->crtc != &crtc->base ||
7772 !encoder->compute_config_late)
7775 ret = encoder->compute_config_late(encoder, crtc_state,
7784 bool intel_fuzzy_clock_check(int clock1, int clock2)
7788 if (clock1 == clock2)
7791 if (!clock1 || !clock2)
7794 diff = abs(clock1 - clock2);
7796 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
7803 intel_compare_m_n(unsigned int m, unsigned int n,
7804 unsigned int m2, unsigned int n2,
7807 if (m == m2 && n == n2)
7810 if (exact || !m || !n || !m2 || !n2)
7813 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
7820 } else if (n < n2) {
7830 return intel_fuzzy_clock_check(m, m2);
7834 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
7835 const struct intel_link_m_n *m2_n2,
7838 return m_n->tu == m2_n2->tu &&
7839 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
7840 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
7841 intel_compare_m_n(m_n->link_m, m_n->link_n,
7842 m2_n2->link_m, m2_n2->link_n, exact);
7846 intel_compare_infoframe(const union hdmi_infoframe *a,
7847 const union hdmi_infoframe *b)
7849 return memcmp(a, b, sizeof(*a)) == 0;
7853 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
7854 const struct drm_dp_vsc_sdp *b)
7856 return memcmp(a, b, sizeof(*a)) == 0;
7860 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
7861 bool fastset, const char *name,
7862 const union hdmi_infoframe *a,
7863 const union hdmi_infoframe *b)
7866 if (!drm_debug_enabled(DRM_UT_KMS))
7869 drm_dbg_kms(&dev_priv->drm,
7870 "fastset mismatch in %s infoframe\n", name);
7871 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7872 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
7873 drm_dbg_kms(&dev_priv->drm, "found:\n");
7874 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
7876 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
7877 drm_err(&dev_priv->drm, "expected:\n");
7878 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
7879 drm_err(&dev_priv->drm, "found:\n");
7880 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
7885 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
7886 bool fastset, const char *name,
7887 const struct drm_dp_vsc_sdp *a,
7888 const struct drm_dp_vsc_sdp *b)
7891 if (!drm_debug_enabled(DRM_UT_KMS))
7894 drm_dbg_kms(&dev_priv->drm,
7895 "fastset mismatch in %s dp sdp\n", name);
7896 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7897 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
7898 drm_dbg_kms(&dev_priv->drm, "found:\n");
7899 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
7901 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
7902 drm_err(&dev_priv->drm, "expected:\n");
7903 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
7904 drm_err(&dev_priv->drm, "found:\n");
7905 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
7909 static void __printf(4, 5)
7910 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
7911 const char *name, const char *format, ...)
7913 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7914 struct va_format vaf;
7917 va_start(args, format);
7922 drm_dbg_kms(&i915->drm,
7923 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
7924 crtc->base.base.id, crtc->base.name, name, &vaf);
7926 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
7927 crtc->base.base.id, crtc->base.name, name, &vaf);
7932 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
7934 if (dev_priv->params.fastboot != -1)
7935 return dev_priv->params.fastboot;
7937 /* Enable fastboot by default on Skylake and newer */
7938 if (DISPLAY_VER(dev_priv) >= 9)
7941 /* Enable fastboot by default on VLV and CHV */
7942 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7945 /* Disabled by default on all others */
7950 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
7951 const struct intel_crtc_state *pipe_config,
7954 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
7955 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7958 bool fixup_inherited = fastset &&
7959 current_config->inherited && !pipe_config->inherited;
7961 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
7962 drm_dbg_kms(&dev_priv->drm,
7963 "initial modeset and fastboot not set\n");
7967 #define PIPE_CONF_CHECK_X(name) do { \
7968 if (current_config->name != pipe_config->name) { \
7969 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7970 "(expected 0x%08x, found 0x%08x)", \
7971 current_config->name, \
7972 pipe_config->name); \
7977 #define PIPE_CONF_CHECK_I(name) do { \
7978 if (current_config->name != pipe_config->name) { \
7979 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7980 "(expected %i, found %i)", \
7981 current_config->name, \
7982 pipe_config->name); \
7987 #define PIPE_CONF_CHECK_BOOL(name) do { \
7988 if (current_config->name != pipe_config->name) { \
7989 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7990 "(expected %s, found %s)", \
7991 yesno(current_config->name), \
7992 yesno(pipe_config->name)); \
7998 * Checks state where we only read out the enabling, but not the entire
7999 * state itself (like full infoframes or ELD for audio). These states
8000 * require a full modeset on bootup to fix up.
8002 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8003 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8004 PIPE_CONF_CHECK_BOOL(name); \
8006 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8007 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8008 yesno(current_config->name), \
8009 yesno(pipe_config->name)); \
8014 #define PIPE_CONF_CHECK_P(name) do { \
8015 if (current_config->name != pipe_config->name) { \
8016 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8017 "(expected %p, found %p)", \
8018 current_config->name, \
8019 pipe_config->name); \
8024 #define PIPE_CONF_CHECK_M_N(name) do { \
8025 if (!intel_compare_link_m_n(¤t_config->name, \
8026 &pipe_config->name,\
8028 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8029 "(expected tu %i gmch %i/%i link %i/%i, " \
8030 "found tu %i, gmch %i/%i link %i/%i)", \
8031 current_config->name.tu, \
8032 current_config->name.gmch_m, \
8033 current_config->name.gmch_n, \
8034 current_config->name.link_m, \
8035 current_config->name.link_n, \
8036 pipe_config->name.tu, \
8037 pipe_config->name.gmch_m, \
8038 pipe_config->name.gmch_n, \
8039 pipe_config->name.link_m, \
8040 pipe_config->name.link_n); \
8045 /* This is required for BDW+ where there is only one set of registers for
8046 * switching between high and low RR.
8047 * This macro can be used whenever a comparison has to be made between one
8048 * hw state and multiple sw state variables.
8050 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8051 if (!intel_compare_link_m_n(¤t_config->name, \
8052 &pipe_config->name, !fastset) && \
8053 !intel_compare_link_m_n(¤t_config->alt_name, \
8054 &pipe_config->name, !fastset)) { \
8055 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8056 "(expected tu %i gmch %i/%i link %i/%i, " \
8057 "or tu %i gmch %i/%i link %i/%i, " \
8058 "found tu %i, gmch %i/%i link %i/%i)", \
8059 current_config->name.tu, \
8060 current_config->name.gmch_m, \
8061 current_config->name.gmch_n, \
8062 current_config->name.link_m, \
8063 current_config->name.link_n, \
8064 current_config->alt_name.tu, \
8065 current_config->alt_name.gmch_m, \
8066 current_config->alt_name.gmch_n, \
8067 current_config->alt_name.link_m, \
8068 current_config->alt_name.link_n, \
8069 pipe_config->name.tu, \
8070 pipe_config->name.gmch_m, \
8071 pipe_config->name.gmch_n, \
8072 pipe_config->name.link_m, \
8073 pipe_config->name.link_n); \
8078 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8079 if ((current_config->name ^ pipe_config->name) & (mask)) { \
8080 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8081 "(%x) (expected %i, found %i)", \
8083 current_config->name & (mask), \
8084 pipe_config->name & (mask)); \
8089 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8090 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8091 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8092 "(expected %i, found %i)", \
8093 current_config->name, \
8094 pipe_config->name); \
8099 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8100 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
8101 &pipe_config->infoframes.name)) { \
8102 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8103 ¤t_config->infoframes.name, \
8104 &pipe_config->infoframes.name); \
8109 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8110 if (!current_config->has_psr && !pipe_config->has_psr && \
8111 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
8112 &pipe_config->infoframes.name)) { \
8113 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8114 ¤t_config->infoframes.name, \
8115 &pipe_config->infoframes.name); \
8120 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8121 if (current_config->name1 != pipe_config->name1) { \
8122 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8123 "(expected %i, found %i, won't compare lut values)", \
8124 current_config->name1, \
8125 pipe_config->name1); \
8128 if (!intel_color_lut_equal(current_config->name2, \
8129 pipe_config->name2, pipe_config->name1, \
8131 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8132 "hw_state doesn't match sw_state"); \
8138 #define PIPE_CONF_QUIRK(quirk) \
8139 ((current_config->quirks | pipe_config->quirks) & (quirk))
8141 PIPE_CONF_CHECK_I(cpu_transcoder);
8143 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8144 PIPE_CONF_CHECK_I(fdi_lanes);
8145 PIPE_CONF_CHECK_M_N(fdi_m_n);
8147 PIPE_CONF_CHECK_I(lane_count);
8148 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8150 if (DISPLAY_VER(dev_priv) < 8) {
8151 PIPE_CONF_CHECK_M_N(dp_m_n);
8153 if (current_config->has_drrs)
8154 PIPE_CONF_CHECK_M_N(dp_m2_n2);
8156 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8158 PIPE_CONF_CHECK_X(output_types);
8160 /* FIXME do the readout properly and get rid of this quirk */
8161 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8162 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8163 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8164 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8165 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8166 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8167 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8169 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8170 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8171 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8172 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8173 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8174 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8176 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8177 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8178 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8179 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8180 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8181 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8183 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8184 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8185 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8186 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8187 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8188 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8190 PIPE_CONF_CHECK_I(pixel_multiplier);
8192 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8193 DRM_MODE_FLAG_INTERLACE);
8195 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8196 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8197 DRM_MODE_FLAG_PHSYNC);
8198 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8199 DRM_MODE_FLAG_NHSYNC);
8200 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8201 DRM_MODE_FLAG_PVSYNC);
8202 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8203 DRM_MODE_FLAG_NVSYNC);
8207 PIPE_CONF_CHECK_I(output_format);
8208 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8209 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8210 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8211 PIPE_CONF_CHECK_BOOL(limited_color_range);
8213 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8214 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8215 PIPE_CONF_CHECK_BOOL(has_infoframe);
8216 /* FIXME do the readout properly and get rid of this quirk */
8217 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8218 PIPE_CONF_CHECK_BOOL(fec_enable);
8220 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8222 PIPE_CONF_CHECK_X(gmch_pfit.control);
8223 /* pfit ratios are autocomputed by the hw on gen4+ */
8224 if (DISPLAY_VER(dev_priv) < 4)
8225 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8226 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8229 * Changing the EDP transcoder input mux
8230 * (A_ONOFF vs. A_ON) requires a full modeset.
8232 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8235 PIPE_CONF_CHECK_I(pipe_src_w);
8236 PIPE_CONF_CHECK_I(pipe_src_h);
8238 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8239 if (current_config->pch_pfit.enabled) {
8240 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8241 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8242 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8243 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8246 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8247 /* FIXME do the readout properly and get rid of this quirk */
8248 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8249 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8251 PIPE_CONF_CHECK_X(gamma_mode);
8252 if (IS_CHERRYVIEW(dev_priv))
8253 PIPE_CONF_CHECK_X(cgm_mode);
8255 PIPE_CONF_CHECK_X(csc_mode);
8256 PIPE_CONF_CHECK_BOOL(gamma_enable);
8257 PIPE_CONF_CHECK_BOOL(csc_enable);
8259 PIPE_CONF_CHECK_I(linetime);
8260 PIPE_CONF_CHECK_I(ips_linetime);
8262 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8264 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8267 PIPE_CONF_CHECK_BOOL(double_wide);
8269 PIPE_CONF_CHECK_P(shared_dpll);
8271 /* FIXME do the readout properly and get rid of this quirk */
8272 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8273 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8274 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8275 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8276 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8277 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8278 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8279 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8280 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8281 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8282 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8283 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8284 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8285 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8286 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8287 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8288 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8289 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8290 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8291 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8292 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8293 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8294 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8295 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8296 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8297 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8298 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8299 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8300 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8301 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8302 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8303 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8305 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8306 PIPE_CONF_CHECK_X(dsi_pll.div);
8308 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8309 PIPE_CONF_CHECK_I(pipe_bpp);
8311 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8312 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8313 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8315 PIPE_CONF_CHECK_I(min_voltage_level);
8318 PIPE_CONF_CHECK_X(infoframes.enable);
8319 PIPE_CONF_CHECK_X(infoframes.gcp);
8320 PIPE_CONF_CHECK_INFOFRAME(avi);
8321 PIPE_CONF_CHECK_INFOFRAME(spd);
8322 PIPE_CONF_CHECK_INFOFRAME(hdmi);
8323 PIPE_CONF_CHECK_INFOFRAME(drm);
8324 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8326 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8327 PIPE_CONF_CHECK_I(master_transcoder);
8328 PIPE_CONF_CHECK_BOOL(bigjoiner);
8329 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8330 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8332 PIPE_CONF_CHECK_I(dsc.compression_enable);
8333 PIPE_CONF_CHECK_I(dsc.dsc_split);
8334 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8336 PIPE_CONF_CHECK_BOOL(splitter.enable);
8337 PIPE_CONF_CHECK_I(splitter.link_count);
8338 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8340 PIPE_CONF_CHECK_I(mst_master_transcoder);
8342 PIPE_CONF_CHECK_BOOL(vrr.enable);
8343 PIPE_CONF_CHECK_I(vrr.vmin);
8344 PIPE_CONF_CHECK_I(vrr.vmax);
8345 PIPE_CONF_CHECK_I(vrr.flipline);
8346 PIPE_CONF_CHECK_I(vrr.pipeline_full);
8348 #undef PIPE_CONF_CHECK_X
8349 #undef PIPE_CONF_CHECK_I
8350 #undef PIPE_CONF_CHECK_BOOL
8351 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8352 #undef PIPE_CONF_CHECK_P
8353 #undef PIPE_CONF_CHECK_FLAGS
8354 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8355 #undef PIPE_CONF_CHECK_COLOR_LUT
8356 #undef PIPE_CONF_QUIRK
8361 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8362 const struct intel_crtc_state *pipe_config)
8364 if (pipe_config->has_pch_encoder) {
8365 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8366 &pipe_config->fdi_m_n);
8367 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8370 * FDI already provided one idea for the dotclock.
8371 * Yell if the encoder disagrees.
8373 drm_WARN(&dev_priv->drm,
8374 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8375 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8376 fdi_dotclock, dotclock);
8380 static void verify_wm_state(struct intel_crtc *crtc,
8381 struct intel_crtc_state *new_crtc_state)
8383 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8384 struct skl_hw_state {
8385 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8386 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8387 struct skl_pipe_wm wm;
8389 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8390 int level, max_level = ilk_wm_max_level(dev_priv);
8391 struct intel_plane *plane;
8392 u8 hw_enabled_slices;
8394 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8397 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8401 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8403 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8405 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8407 if (DISPLAY_VER(dev_priv) >= 11 &&
8408 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8409 drm_err(&dev_priv->drm,
8410 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8411 dev_priv->dbuf.enabled_slices,
8414 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8415 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8416 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8419 for (level = 0; level <= max_level; level++) {
8420 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8421 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8423 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8426 drm_err(&dev_priv->drm,
8427 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8428 plane->base.base.id, plane->base.name, level,
8429 sw_wm_level->enable,
8430 sw_wm_level->blocks,
8432 hw_wm_level->enable,
8433 hw_wm_level->blocks,
8434 hw_wm_level->lines);
8437 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8438 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8440 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8441 drm_err(&dev_priv->drm,
8442 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8443 plane->base.base.id, plane->base.name,
8444 sw_wm_level->enable,
8445 sw_wm_level->blocks,
8447 hw_wm_level->enable,
8448 hw_wm_level->blocks,
8449 hw_wm_level->lines);
8453 hw_ddb_entry = &hw->ddb_y[plane->id];
8454 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8456 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8457 drm_err(&dev_priv->drm,
8458 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8459 plane->base.base.id, plane->base.name,
8460 sw_ddb_entry->start, sw_ddb_entry->end,
8461 hw_ddb_entry->start, hw_ddb_entry->end);
8469 verify_connector_state(struct intel_atomic_state *state,
8470 struct intel_crtc *crtc)
8472 struct drm_connector *connector;
8473 struct drm_connector_state *new_conn_state;
8476 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8477 struct drm_encoder *encoder = connector->encoder;
8478 struct intel_crtc_state *crtc_state = NULL;
8480 if (new_conn_state->crtc != &crtc->base)
8484 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8486 intel_connector_verify_state(crtc_state, new_conn_state);
8488 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8489 "connector's atomic encoder doesn't match legacy encoder\n");
8494 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8496 struct intel_encoder *encoder;
8497 struct drm_connector *connector;
8498 struct drm_connector_state *old_conn_state, *new_conn_state;
8501 for_each_intel_encoder(&dev_priv->drm, encoder) {
8502 bool enabled = false, found = false;
8505 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8506 encoder->base.base.id,
8507 encoder->base.name);
8509 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8510 new_conn_state, i) {
8511 if (old_conn_state->best_encoder == &encoder->base)
8514 if (new_conn_state->best_encoder != &encoder->base)
8516 found = enabled = true;
8518 I915_STATE_WARN(new_conn_state->crtc !=
8520 "connector's crtc doesn't match encoder crtc\n");
8526 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8527 "encoder's enabled state mismatch "
8528 "(expected %i, found %i)\n",
8529 !!encoder->base.crtc, enabled);
8531 if (!encoder->base.crtc) {
8534 active = encoder->get_hw_state(encoder, &pipe);
8535 I915_STATE_WARN(active,
8536 "encoder detached but still enabled on pipe %c.\n",
8543 verify_crtc_state(struct intel_crtc *crtc,
8544 struct intel_crtc_state *old_crtc_state,
8545 struct intel_crtc_state *new_crtc_state)
8547 struct drm_device *dev = crtc->base.dev;
8548 struct drm_i915_private *dev_priv = to_i915(dev);
8549 struct intel_encoder *encoder;
8550 struct intel_crtc_state *pipe_config = old_crtc_state;
8551 struct drm_atomic_state *state = old_crtc_state->uapi.state;
8552 struct intel_crtc *master = crtc;
8554 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8555 intel_crtc_free_hw_state(old_crtc_state);
8556 intel_crtc_state_reset(old_crtc_state, crtc);
8557 old_crtc_state->uapi.state = state;
8559 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8562 pipe_config->hw.enable = new_crtc_state->hw.enable;
8564 intel_crtc_get_pipe_config(pipe_config);
8566 /* we keep both pipes enabled on 830 */
8567 if (IS_I830(dev_priv) && pipe_config->hw.active)
8568 pipe_config->hw.active = new_crtc_state->hw.active;
8570 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8571 "crtc active state doesn't match with hw state "
8572 "(expected %i, found %i)\n",
8573 new_crtc_state->hw.active, pipe_config->hw.active);
8575 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8576 "transitional active state does not match atomic hw state "
8577 "(expected %i, found %i)\n",
8578 new_crtc_state->hw.active, crtc->active);
8580 if (new_crtc_state->bigjoiner_slave)
8581 master = new_crtc_state->bigjoiner_linked_crtc;
8583 for_each_encoder_on_crtc(dev, &master->base, encoder) {
8587 active = encoder->get_hw_state(encoder, &pipe);
8588 I915_STATE_WARN(active != new_crtc_state->hw.active,
8589 "[ENCODER:%i] active %i with crtc active %i\n",
8590 encoder->base.base.id, active,
8591 new_crtc_state->hw.active);
8593 I915_STATE_WARN(active && master->pipe != pipe,
8594 "Encoder connected to wrong pipe %c\n",
8598 intel_encoder_get_config(encoder, pipe_config);
8601 if (!new_crtc_state->hw.active)
8604 intel_pipe_config_sanity_check(dev_priv, pipe_config);
8606 if (!intel_pipe_config_compare(new_crtc_state,
8607 pipe_config, false)) {
8608 I915_STATE_WARN(1, "pipe state doesn't match!\n");
8609 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8610 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8615 intel_verify_planes(struct intel_atomic_state *state)
8617 struct intel_plane *plane;
8618 const struct intel_plane_state *plane_state;
8621 for_each_new_intel_plane_in_state(state, plane,
8623 assert_plane(plane, plane_state->planar_slave ||
8624 plane_state->uapi.visible);
8628 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8629 struct intel_shared_dpll *pll,
8630 struct intel_crtc *crtc,
8631 struct intel_crtc_state *new_crtc_state)
8633 struct intel_dpll_hw_state dpll_hw_state;
8637 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8639 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8641 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8643 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8644 I915_STATE_WARN(!pll->on && pll->active_mask,
8645 "pll in active use but not on in sw tracking\n");
8646 I915_STATE_WARN(pll->on && !pll->active_mask,
8647 "pll is on but not used by any active pipe\n");
8648 I915_STATE_WARN(pll->on != active,
8649 "pll on state mismatch (expected %i, found %i)\n",
8654 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8655 "more active pll users than references: 0x%x vs 0x%x\n",
8656 pll->active_mask, pll->state.pipe_mask);
8661 pipe_mask = BIT(crtc->pipe);
8663 if (new_crtc_state->hw.active)
8664 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8665 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8666 pipe_name(crtc->pipe), pll->active_mask);
8668 I915_STATE_WARN(pll->active_mask & pipe_mask,
8669 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8670 pipe_name(crtc->pipe), pll->active_mask);
8672 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8673 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8674 pipe_mask, pll->state.pipe_mask);
8676 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8678 sizeof(dpll_hw_state)),
8679 "pll hw state mismatch\n");
8683 verify_shared_dpll_state(struct intel_crtc *crtc,
8684 struct intel_crtc_state *old_crtc_state,
8685 struct intel_crtc_state *new_crtc_state)
8687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8689 if (new_crtc_state->shared_dpll)
8690 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8692 if (old_crtc_state->shared_dpll &&
8693 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8694 u8 pipe_mask = BIT(crtc->pipe);
8695 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8697 I915_STATE_WARN(pll->active_mask & pipe_mask,
8698 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8699 pipe_name(crtc->pipe), pll->active_mask);
8700 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8701 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8702 pipe_name(crtc->pipe), pll->state.pipe_mask);
8707 intel_modeset_verify_crtc(struct intel_crtc *crtc,
8708 struct intel_atomic_state *state,
8709 struct intel_crtc_state *old_crtc_state,
8710 struct intel_crtc_state *new_crtc_state)
8712 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
8715 verify_wm_state(crtc, new_crtc_state);
8716 verify_connector_state(state, crtc);
8717 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
8718 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
8722 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
8726 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
8727 verify_single_dpll_state(dev_priv,
8728 &dev_priv->dpll.shared_dplls[i],
8733 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
8734 struct intel_atomic_state *state)
8736 verify_encoder_state(dev_priv, state);
8737 verify_connector_state(state, NULL);
8738 verify_disabled_dpll_state(dev_priv);
8742 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
8744 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8745 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8746 struct drm_display_mode adjusted_mode =
8747 crtc_state->hw.adjusted_mode;
8749 if (crtc_state->vrr.enable) {
8750 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
8751 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
8752 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
8753 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
8756 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
8758 crtc->mode_flags = crtc_state->mode_flags;
8761 * The scanline counter increments at the leading edge of hsync.
8763 * On most platforms it starts counting from vtotal-1 on the
8764 * first active line. That means the scanline counter value is
8765 * always one less than what we would expect. Ie. just after
8766 * start of vblank, which also occurs at start of hsync (on the
8767 * last active line), the scanline counter will read vblank_start-1.
8769 * On gen2 the scanline counter starts counting from 1 instead
8770 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
8771 * to keep the value positive), instead of adding one.
8773 * On HSW+ the behaviour of the scanline counter depends on the output
8774 * type. For DP ports it behaves like most other platforms, but on HDMI
8775 * there's an extra 1 line difference. So we need to add two instead of
8778 * On VLV/CHV DSI the scanline counter would appear to increment
8779 * approx. 1/3 of a scanline before start of vblank. Unfortunately
8780 * that means we can't tell whether we're in vblank or not while
8781 * we're on that particular line. We must still set scanline_offset
8782 * to 1 so that the vblank timestamps come out correct when we query
8783 * the scanline counter from within the vblank interrupt handler.
8784 * However if queried just before the start of vblank we'll get an
8785 * answer that's slightly in the future.
8787 if (DISPLAY_VER(dev_priv) == 2) {
8790 vtotal = adjusted_mode.crtc_vtotal;
8791 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8794 crtc->scanline_offset = vtotal - 1;
8795 } else if (HAS_DDI(dev_priv) &&
8796 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
8797 crtc->scanline_offset = 2;
8799 crtc->scanline_offset = 1;
8803 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
8805 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8806 struct intel_crtc_state *new_crtc_state;
8807 struct intel_crtc *crtc;
8810 if (!dev_priv->display.crtc_compute_clock)
8813 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8814 if (!intel_crtc_needs_modeset(new_crtc_state))
8817 intel_release_shared_dplls(state, crtc);
8822 * This implements the workaround described in the "notes" section of the mode
8823 * set sequence documentation. When going from no pipes or single pipe to
8824 * multiple pipes, and planes are enabled after the pipe, we need to wait at
8825 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
8827 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
8829 struct intel_crtc_state *crtc_state;
8830 struct intel_crtc *crtc;
8831 struct intel_crtc_state *first_crtc_state = NULL;
8832 struct intel_crtc_state *other_crtc_state = NULL;
8833 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
8836 /* look at all crtc's that are going to be enabled in during modeset */
8837 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8838 if (!crtc_state->hw.active ||
8839 !intel_crtc_needs_modeset(crtc_state))
8842 if (first_crtc_state) {
8843 other_crtc_state = crtc_state;
8846 first_crtc_state = crtc_state;
8847 first_pipe = crtc->pipe;
8851 /* No workaround needed? */
8852 if (!first_crtc_state)
8855 /* w/a possibly needed, check how many crtc's are already enabled. */
8856 for_each_intel_crtc(state->base.dev, crtc) {
8857 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8858 if (IS_ERR(crtc_state))
8859 return PTR_ERR(crtc_state);
8861 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
8863 if (!crtc_state->hw.active ||
8864 intel_crtc_needs_modeset(crtc_state))
8867 /* 2 or more enabled crtcs means no need for w/a */
8868 if (enabled_pipe != INVALID_PIPE)
8871 enabled_pipe = crtc->pipe;
8874 if (enabled_pipe != INVALID_PIPE)
8875 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
8876 else if (other_crtc_state)
8877 other_crtc_state->hsw_workaround_pipe = first_pipe;
8882 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
8885 const struct intel_crtc_state *crtc_state;
8886 struct intel_crtc *crtc;
8889 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8890 if (crtc_state->hw.active)
8891 active_pipes |= BIT(crtc->pipe);
8893 active_pipes &= ~BIT(crtc->pipe);
8896 return active_pipes;
8899 static int intel_modeset_checks(struct intel_atomic_state *state)
8901 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8903 state->modeset = true;
8905 if (IS_HASWELL(dev_priv))
8906 return hsw_mode_set_planes_workaround(state);
8912 * Handle calculation of various watermark data at the end of the atomic check
8913 * phase. The code here should be run after the per-crtc and per-plane 'check'
8914 * handlers to ensure that all derived state has been updated.
8916 static int calc_watermark_data(struct intel_atomic_state *state)
8918 struct drm_device *dev = state->base.dev;
8919 struct drm_i915_private *dev_priv = to_i915(dev);
8921 /* Is there platform-specific watermark information to calculate? */
8922 if (dev_priv->display.compute_global_watermarks)
8923 return dev_priv->display.compute_global_watermarks(state);
8928 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
8929 struct intel_crtc_state *new_crtc_state)
8931 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
8934 new_crtc_state->uapi.mode_changed = false;
8935 new_crtc_state->update_pipe = true;
8938 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
8939 struct intel_crtc_state *new_crtc_state)
8942 * If we're not doing the full modeset we want to
8943 * keep the current M/N values as they may be
8944 * sufficiently different to the computed values
8945 * to cause problems.
8947 * FIXME: should really copy more fuzzy state here
8949 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
8950 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
8951 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
8952 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
8955 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
8956 struct intel_crtc *crtc,
8959 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8960 struct intel_plane *plane;
8962 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8963 struct intel_plane_state *plane_state;
8965 if ((plane_ids_mask & BIT(plane->id)) == 0)
8968 plane_state = intel_atomic_get_plane_state(state, plane);
8969 if (IS_ERR(plane_state))
8970 return PTR_ERR(plane_state);
8976 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
8977 struct intel_crtc *crtc)
8979 const struct intel_crtc_state *old_crtc_state =
8980 intel_atomic_get_old_crtc_state(state, crtc);
8981 const struct intel_crtc_state *new_crtc_state =
8982 intel_atomic_get_new_crtc_state(state, crtc);
8984 return intel_crtc_add_planes_to_state(state, crtc,
8985 old_crtc_state->enabled_planes |
8986 new_crtc_state->enabled_planes);
8989 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
8991 /* See {hsw,vlv,ivb}_plane_ratio() */
8992 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
8993 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8994 IS_IVYBRIDGE(dev_priv);
8997 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
8998 struct intel_crtc *crtc,
8999 struct intel_crtc *other)
9001 const struct intel_plane_state *plane_state;
9002 struct intel_plane *plane;
9006 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9007 if (plane->pipe == crtc->pipe)
9008 plane_ids |= BIT(plane->id);
9011 return intel_crtc_add_planes_to_state(state, other, plane_ids);
9014 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9016 const struct intel_crtc_state *crtc_state;
9017 struct intel_crtc *crtc;
9020 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9023 if (!crtc_state->bigjoiner)
9026 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9027 crtc_state->bigjoiner_linked_crtc);
9035 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9037 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9038 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9039 struct intel_plane_state *plane_state;
9040 struct intel_plane *plane;
9041 struct intel_crtc *crtc;
9044 ret = icl_add_linked_planes(state);
9048 ret = intel_bigjoiner_add_affected_planes(state);
9052 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9053 ret = intel_plane_atomic_check(state, plane);
9055 drm_dbg_atomic(&dev_priv->drm,
9056 "[PLANE:%d:%s] atomic driver check failed\n",
9057 plane->base.base.id, plane->base.name);
9062 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9063 new_crtc_state, i) {
9064 u8 old_active_planes, new_active_planes;
9066 ret = icl_check_nv12_planes(new_crtc_state);
9071 * On some platforms the number of active planes affects
9072 * the planes' minimum cdclk calculation. Add such planes
9073 * to the state before we compute the minimum cdclk.
9075 if (!active_planes_affects_min_cdclk(dev_priv))
9078 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9079 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9081 if (hweight8(old_active_planes) == hweight8(new_active_planes))
9084 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9092 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9093 bool *need_cdclk_calc)
9095 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9096 const struct intel_cdclk_state *old_cdclk_state;
9097 const struct intel_cdclk_state *new_cdclk_state;
9098 struct intel_plane_state *plane_state;
9099 struct intel_bw_state *new_bw_state;
9100 struct intel_plane *plane;
9106 * active_planes bitmask has been updated, and potentially
9107 * affected planes are part of the state. We can now
9108 * compute the minimum cdclk for each plane.
9110 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9111 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9116 old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9117 new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9119 if (new_cdclk_state &&
9120 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9121 *need_cdclk_calc = true;
9123 ret = dev_priv->display.bw_calc_min_cdclk(state);
9127 new_bw_state = intel_atomic_get_new_bw_state(state);
9129 if (!new_cdclk_state || !new_bw_state)
9132 for_each_pipe(dev_priv, pipe) {
9133 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9136 * Currently do this change only if we need to increase
9138 if (new_bw_state->min_cdclk > min_cdclk)
9139 *need_cdclk_calc = true;
9145 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9147 struct intel_crtc_state *crtc_state;
9148 struct intel_crtc *crtc;
9151 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9152 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9155 ret = intel_crtc_atomic_check(state, crtc);
9157 drm_dbg_atomic(&i915->drm,
9158 "[CRTC:%d:%s] atomic driver check failed\n",
9159 crtc->base.base.id, crtc->base.name);
9167 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9170 const struct intel_crtc_state *new_crtc_state;
9171 struct intel_crtc *crtc;
9174 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9175 if (new_crtc_state->hw.enable &&
9176 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9177 intel_crtc_needs_modeset(new_crtc_state))
9184 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9185 struct intel_crtc *crtc,
9186 struct intel_crtc_state *old_crtc_state,
9187 struct intel_crtc_state *new_crtc_state)
9189 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9190 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9191 struct intel_crtc *slave, *master;
9193 /* slave being enabled, is master is still claiming this crtc? */
9194 if (old_crtc_state->bigjoiner_slave) {
9196 master = old_crtc_state->bigjoiner_linked_crtc;
9197 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9198 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9202 if (!new_crtc_state->bigjoiner)
9205 if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
9206 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9207 "CRTC + 1 to be used, doesn't exist\n",
9208 crtc->base.base.id, crtc->base.name);
9212 slave = new_crtc_state->bigjoiner_linked_crtc =
9213 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
9214 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9216 if (IS_ERR(slave_crtc_state))
9217 return PTR_ERR(slave_crtc_state);
9219 /* master being enabled, slave was already configured? */
9220 if (slave_crtc_state->uapi.enable)
9223 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9224 slave->base.base.id, slave->base.name);
9226 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9229 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9230 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9231 slave->base.base.id, slave->base.name,
9232 master->base.base.id, master->base.name);
9236 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9237 struct intel_crtc_state *master_crtc_state)
9239 struct intel_crtc_state *slave_crtc_state =
9240 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9242 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9243 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9244 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9245 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9249 * DOC: asynchronous flip implementation
9251 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9252 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9253 * Correspondingly, support is currently added for primary plane only.
9255 * Async flip can only change the plane surface address, so anything else
9256 * changing is rejected from the intel_atomic_check_async() function.
9257 * Once this check is cleared, flip done interrupt is enabled using
9258 * the intel_crtc_enable_flip_done() function.
9260 * As soon as the surface address register is written, flip done interrupt is
9261 * generated and the requested events are sent to the usersapce in the interrupt
9262 * handler itself. The timestamp and sequence sent during the flip done event
9263 * correspond to the last vblank and have no relation to the actual time when
9264 * the flip done event was sent.
9266 static int intel_atomic_check_async(struct intel_atomic_state *state)
9268 struct drm_i915_private *i915 = to_i915(state->base.dev);
9269 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9270 const struct intel_plane_state *new_plane_state, *old_plane_state;
9271 struct intel_crtc *crtc;
9272 struct intel_plane *plane;
9275 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9276 new_crtc_state, i) {
9277 if (intel_crtc_needs_modeset(new_crtc_state)) {
9278 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9282 if (!new_crtc_state->hw.active) {
9283 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9286 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9287 drm_dbg_kms(&i915->drm,
9288 "Active planes cannot be changed during async flip\n");
9293 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9294 new_plane_state, i) {
9296 * TODO: Async flip is only supported through the page flip IOCTL
9297 * as of now. So support currently added for primary plane only.
9298 * Support for other planes on platforms on which supports
9299 * this(vlv/chv and icl+) should be added when async flip is
9300 * enabled in the atomic IOCTL path.
9302 if (!plane->async_flip)
9306 * FIXME: This check is kept generic for all platforms.
9307 * Need to verify this for all gen9 and gen10 platforms to enable
9308 * this selectively if required.
9310 switch (new_plane_state->hw.fb->modifier) {
9311 case I915_FORMAT_MOD_X_TILED:
9312 case I915_FORMAT_MOD_Y_TILED:
9313 case I915_FORMAT_MOD_Yf_TILED:
9316 drm_dbg_kms(&i915->drm,
9317 "Linear memory/CCS does not support async flips\n");
9321 if (old_plane_state->view.color_plane[0].stride !=
9322 new_plane_state->view.color_plane[0].stride) {
9323 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9327 if (old_plane_state->hw.fb->modifier !=
9328 new_plane_state->hw.fb->modifier) {
9329 drm_dbg_kms(&i915->drm,
9330 "Framebuffer modifiers cannot be changed in async flip\n");
9334 if (old_plane_state->hw.fb->format !=
9335 new_plane_state->hw.fb->format) {
9336 drm_dbg_kms(&i915->drm,
9337 "Framebuffer format cannot be changed in async flip\n");
9341 if (old_plane_state->hw.rotation !=
9342 new_plane_state->hw.rotation) {
9343 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9347 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9348 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9349 drm_dbg_kms(&i915->drm,
9350 "Plane size/co-ordinates cannot be changed in async flip\n");
9354 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9355 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9359 if (old_plane_state->hw.pixel_blend_mode !=
9360 new_plane_state->hw.pixel_blend_mode) {
9361 drm_dbg_kms(&i915->drm,
9362 "Pixel blend mode cannot be changed in async flip\n");
9366 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9367 drm_dbg_kms(&i915->drm,
9368 "Color encoding cannot be changed in async flip\n");
9372 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9373 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9381 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9383 struct intel_crtc_state *crtc_state;
9384 struct intel_crtc *crtc;
9387 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9388 struct intel_crtc_state *linked_crtc_state;
9389 struct intel_crtc *linked_crtc;
9392 if (!crtc_state->bigjoiner)
9395 linked_crtc = crtc_state->bigjoiner_linked_crtc;
9396 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9397 if (IS_ERR(linked_crtc_state))
9398 return PTR_ERR(linked_crtc_state);
9400 if (!intel_crtc_needs_modeset(crtc_state))
9403 linked_crtc_state->uapi.mode_changed = true;
9405 ret = drm_atomic_add_affected_connectors(&state->base,
9406 &linked_crtc->base);
9410 ret = intel_atomic_add_affected_planes(state, linked_crtc);
9415 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9416 /* Kill old bigjoiner link, we may re-establish afterwards */
9417 if (intel_crtc_needs_modeset(crtc_state) &&
9418 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9419 kill_bigjoiner_slave(state, crtc_state);
9426 * intel_atomic_check - validate state object
9428 * @_state: state to validate
9430 static int intel_atomic_check(struct drm_device *dev,
9431 struct drm_atomic_state *_state)
9433 struct drm_i915_private *dev_priv = to_i915(dev);
9434 struct intel_atomic_state *state = to_intel_atomic_state(_state);
9435 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9436 struct intel_crtc *crtc;
9438 bool any_ms = false;
9440 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9441 new_crtc_state, i) {
9442 if (new_crtc_state->inherited != old_crtc_state->inherited)
9443 new_crtc_state->uapi.mode_changed = true;
9446 intel_vrr_check_modeset(state);
9448 ret = drm_atomic_helper_check_modeset(dev, &state->base);
9452 ret = intel_bigjoiner_add_affected_crtcs(state);
9456 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9457 new_crtc_state, i) {
9458 if (!intel_crtc_needs_modeset(new_crtc_state)) {
9460 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9465 if (!new_crtc_state->uapi.enable) {
9466 if (!new_crtc_state->bigjoiner_slave) {
9467 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9473 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9477 ret = intel_modeset_pipe_config(state, new_crtc_state);
9481 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9487 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9488 new_crtc_state, i) {
9489 if (!intel_crtc_needs_modeset(new_crtc_state))
9492 ret = intel_modeset_pipe_config_late(new_crtc_state);
9496 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9500 * Check if fastset is allowed by external dependencies like other
9501 * pipes and transcoders.
9503 * Right now it only forces a fullmodeset when the MST master
9504 * transcoder did not changed but the pipe of the master transcoder
9505 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9506 * in case of port synced crtcs, if one of the synced crtcs
9507 * needs a full modeset, all other synced crtcs should be
9508 * forced a full modeset.
9510 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9511 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9514 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9515 enum transcoder master = new_crtc_state->mst_master_transcoder;
9517 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9518 new_crtc_state->uapi.mode_changed = true;
9519 new_crtc_state->update_pipe = false;
9523 if (is_trans_port_sync_mode(new_crtc_state)) {
9524 u8 trans = new_crtc_state->sync_mode_slaves_mask;
9526 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9527 trans |= BIT(new_crtc_state->master_transcoder);
9529 if (intel_cpu_transcoders_need_modeset(state, trans)) {
9530 new_crtc_state->uapi.mode_changed = true;
9531 new_crtc_state->update_pipe = false;
9535 if (new_crtc_state->bigjoiner) {
9536 struct intel_crtc_state *linked_crtc_state =
9537 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9539 if (intel_crtc_needs_modeset(linked_crtc_state)) {
9540 new_crtc_state->uapi.mode_changed = true;
9541 new_crtc_state->update_pipe = false;
9546 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9547 new_crtc_state, i) {
9548 if (intel_crtc_needs_modeset(new_crtc_state)) {
9553 if (!new_crtc_state->update_pipe)
9556 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9559 if (any_ms && !check_digital_port_conflicts(state)) {
9560 drm_dbg_kms(&dev_priv->drm,
9561 "rejecting conflicting digital port configuration\n");
9566 ret = drm_dp_mst_atomic_check(&state->base);
9570 ret = intel_atomic_check_planes(state);
9574 intel_fbc_choose_crtc(dev_priv, state);
9575 ret = calc_watermark_data(state);
9579 ret = intel_bw_atomic_check(state);
9583 ret = intel_atomic_check_cdclk(state, &any_ms);
9588 ret = intel_modeset_checks(state);
9592 ret = intel_modeset_calc_cdclk(state);
9596 intel_modeset_clear_plls(state);
9599 ret = intel_atomic_check_crtcs(state);
9603 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9604 new_crtc_state, i) {
9605 if (new_crtc_state->uapi.async_flip) {
9606 ret = intel_atomic_check_async(state);
9611 if (!intel_crtc_needs_modeset(new_crtc_state) &&
9612 !new_crtc_state->update_pipe)
9615 intel_dump_pipe_config(new_crtc_state, state,
9616 intel_crtc_needs_modeset(new_crtc_state) ?
9617 "[modeset]" : "[fastset]");
9623 if (ret == -EDEADLK)
9627 * FIXME would probably be nice to know which crtc specifically
9628 * caused the failure, in cases where we can pinpoint it.
9630 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9632 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9637 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9639 struct intel_crtc_state *crtc_state;
9640 struct intel_crtc *crtc;
9643 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9647 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9648 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9650 if (mode_changed || crtc_state->update_pipe ||
9651 crtc_state->uapi.color_mgmt_changed) {
9652 intel_dsb_prepare(crtc_state);
9659 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9660 struct intel_crtc_state *crtc_state)
9662 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9664 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
9665 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
9667 if (crtc_state->has_pch_encoder) {
9668 enum pipe pch_transcoder =
9669 intel_crtc_pch_transcoder(crtc);
9671 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
9675 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
9676 const struct intel_crtc_state *new_crtc_state)
9678 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9679 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9682 * Update pipe size and adjust fitter if needed: the reason for this is
9683 * that in compute_mode_changes we check the native mode (not the pfit
9684 * mode) to see if we can flip rather than do a full mode set. In the
9685 * fastboot case, we'll flip, but if we don't update the pipesrc and
9686 * pfit state, we'll end up with a big fb scanned out into the wrong
9689 intel_set_pipe_src_size(new_crtc_state);
9691 /* on skylake this is done by detaching scalers */
9692 if (DISPLAY_VER(dev_priv) >= 9) {
9693 skl_detach_scalers(new_crtc_state);
9695 if (new_crtc_state->pch_pfit.enabled)
9696 skl_pfit_enable(new_crtc_state);
9697 } else if (HAS_PCH_SPLIT(dev_priv)) {
9698 if (new_crtc_state->pch_pfit.enabled)
9699 ilk_pfit_enable(new_crtc_state);
9700 else if (old_crtc_state->pch_pfit.enabled)
9701 ilk_pfit_disable(old_crtc_state);
9705 * The register is supposedly single buffered so perhaps
9706 * not 100% correct to do this here. But SKL+ calculate
9707 * this based on the adjust pixel rate so pfit changes do
9708 * affect it and so it must be updated for fastsets.
9709 * HSW/BDW only really need this here for fastboot, after
9710 * that the value should not change without a full modeset.
9712 if (DISPLAY_VER(dev_priv) >= 9 ||
9713 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9714 hsw_set_linetime_wm(new_crtc_state);
9716 if (DISPLAY_VER(dev_priv) >= 11)
9717 icl_set_pipe_chicken(crtc);
9720 static void commit_pipe_config(struct intel_atomic_state *state,
9721 struct intel_crtc *crtc)
9723 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9724 const struct intel_crtc_state *old_crtc_state =
9725 intel_atomic_get_old_crtc_state(state, crtc);
9726 const struct intel_crtc_state *new_crtc_state =
9727 intel_atomic_get_new_crtc_state(state, crtc);
9728 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9731 * During modesets pipe configuration was programmed as the
9735 if (new_crtc_state->uapi.color_mgmt_changed ||
9736 new_crtc_state->update_pipe)
9737 intel_color_commit(new_crtc_state);
9739 if (DISPLAY_VER(dev_priv) >= 9)
9740 skl_detach_scalers(new_crtc_state);
9742 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
9743 bdw_set_pipemisc(new_crtc_state);
9745 if (new_crtc_state->update_pipe)
9746 intel_pipe_fastset(old_crtc_state, new_crtc_state);
9748 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
9751 if (dev_priv->display.atomic_update_watermarks)
9752 dev_priv->display.atomic_update_watermarks(state, crtc);
9755 static void intel_enable_crtc(struct intel_atomic_state *state,
9756 struct intel_crtc *crtc)
9758 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9759 const struct intel_crtc_state *new_crtc_state =
9760 intel_atomic_get_new_crtc_state(state, crtc);
9762 if (!intel_crtc_needs_modeset(new_crtc_state))
9765 intel_crtc_update_active_timings(new_crtc_state);
9767 dev_priv->display.crtc_enable(state, crtc);
9769 if (new_crtc_state->bigjoiner_slave)
9772 /* vblanks work again, re-enable pipe CRC. */
9773 intel_crtc_enable_pipe_crc(crtc);
9776 static void intel_update_crtc(struct intel_atomic_state *state,
9777 struct intel_crtc *crtc)
9779 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9780 const struct intel_crtc_state *old_crtc_state =
9781 intel_atomic_get_old_crtc_state(state, crtc);
9782 struct intel_crtc_state *new_crtc_state =
9783 intel_atomic_get_new_crtc_state(state, crtc);
9784 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9787 if (new_crtc_state->preload_luts &&
9788 (new_crtc_state->uapi.color_mgmt_changed ||
9789 new_crtc_state->update_pipe))
9790 intel_color_load_luts(new_crtc_state);
9792 intel_pre_plane_update(state, crtc);
9794 if (new_crtc_state->update_pipe)
9795 intel_encoders_update_pipe(state, crtc);
9798 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
9799 intel_fbc_disable(crtc);
9801 intel_fbc_enable(state, crtc);
9803 /* Perform vblank evasion around commit operation */
9804 intel_pipe_update_start(new_crtc_state);
9806 commit_pipe_config(state, crtc);
9808 if (DISPLAY_VER(dev_priv) >= 9)
9809 skl_update_planes_on_crtc(state, crtc);
9811 i9xx_update_planes_on_crtc(state, crtc);
9813 intel_pipe_update_end(new_crtc_state);
9816 * We usually enable FIFO underrun interrupts as part of the
9817 * CRTC enable sequence during modesets. But when we inherit a
9818 * valid pipe configuration from the BIOS we need to take care
9819 * of enabling them on the CRTC's first fastset.
9821 if (new_crtc_state->update_pipe && !modeset &&
9822 old_crtc_state->inherited)
9823 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
9826 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
9827 struct intel_crtc_state *old_crtc_state,
9828 struct intel_crtc_state *new_crtc_state,
9829 struct intel_crtc *crtc)
9831 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9833 drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
9835 intel_crtc_disable_planes(state, crtc);
9838 * We still need special handling for disabling bigjoiner master
9839 * and slaves since for slave we do not have encoder or plls
9840 * so we dont need to disable those.
9842 if (old_crtc_state->bigjoiner) {
9843 intel_crtc_disable_planes(state,
9844 old_crtc_state->bigjoiner_linked_crtc);
9845 old_crtc_state->bigjoiner_linked_crtc->active = false;
9849 * We need to disable pipe CRC before disabling the pipe,
9850 * or we race against vblank off.
9852 intel_crtc_disable_pipe_crc(crtc);
9854 dev_priv->display.crtc_disable(state, crtc);
9855 crtc->active = false;
9856 intel_fbc_disable(crtc);
9857 intel_disable_shared_dpll(old_crtc_state);
9859 /* FIXME unify this for all platforms */
9860 if (!new_crtc_state->hw.active &&
9861 !HAS_GMCH(dev_priv) &&
9862 dev_priv->display.initial_watermarks)
9863 dev_priv->display.initial_watermarks(state, crtc);
9866 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
9868 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
9869 struct intel_crtc *crtc;
9873 /* Only disable port sync and MST slaves */
9874 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9875 new_crtc_state, i) {
9876 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
9879 if (!old_crtc_state->hw.active)
9882 /* In case of Transcoder port Sync master slave CRTCs can be
9883 * assigned in any order and we need to make sure that
9884 * slave CRTCs are disabled first and then master CRTC since
9885 * Slave vblanks are masked till Master Vblanks.
9887 if (!is_trans_port_sync_slave(old_crtc_state) &&
9888 !intel_dp_mst_is_slave_trans(old_crtc_state))
9891 intel_pre_plane_update(state, crtc);
9892 intel_old_crtc_state_disables(state, old_crtc_state,
9893 new_crtc_state, crtc);
9894 handled |= BIT(crtc->pipe);
9897 /* Disable everything else left on */
9898 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9899 new_crtc_state, i) {
9900 if (!intel_crtc_needs_modeset(new_crtc_state) ||
9901 (handled & BIT(crtc->pipe)) ||
9902 old_crtc_state->bigjoiner_slave)
9905 intel_pre_plane_update(state, crtc);
9906 if (old_crtc_state->bigjoiner) {
9907 struct intel_crtc *slave =
9908 old_crtc_state->bigjoiner_linked_crtc;
9910 intel_pre_plane_update(state, slave);
9913 if (old_crtc_state->hw.active)
9914 intel_old_crtc_state_disables(state, old_crtc_state,
9915 new_crtc_state, crtc);
9919 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
9921 struct intel_crtc_state *new_crtc_state;
9922 struct intel_crtc *crtc;
9925 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9926 if (!new_crtc_state->hw.active)
9929 intel_enable_crtc(state, crtc);
9930 intel_update_crtc(state, crtc);
9934 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
9936 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9937 struct intel_crtc *crtc;
9938 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9939 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
9940 u8 update_pipes = 0, modeset_pipes = 0;
9943 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9944 enum pipe pipe = crtc->pipe;
9946 if (!new_crtc_state->hw.active)
9949 /* ignore allocations for crtc's that have been turned off. */
9950 if (!intel_crtc_needs_modeset(new_crtc_state)) {
9951 entries[pipe] = old_crtc_state->wm.skl.ddb;
9952 update_pipes |= BIT(pipe);
9954 modeset_pipes |= BIT(pipe);
9959 * Whenever the number of active pipes changes, we need to make sure we
9960 * update the pipes in the right order so that their ddb allocations
9961 * never overlap with each other between CRTC updates. Otherwise we'll
9962 * cause pipe underruns and other bad stuff.
9964 * So first lets enable all pipes that do not need a fullmodeset as
9965 * those don't have any external dependency.
9967 while (update_pipes) {
9968 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9969 new_crtc_state, i) {
9970 enum pipe pipe = crtc->pipe;
9972 if ((update_pipes & BIT(pipe)) == 0)
9975 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
9976 entries, I915_MAX_PIPES, pipe))
9979 entries[pipe] = new_crtc_state->wm.skl.ddb;
9980 update_pipes &= ~BIT(pipe);
9982 intel_update_crtc(state, crtc);
9985 * If this is an already active pipe, it's DDB changed,
9986 * and this isn't the last pipe that needs updating
9987 * then we need to wait for a vblank to pass for the
9988 * new ddb allocation to take effect.
9990 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
9991 &old_crtc_state->wm.skl.ddb) &&
9992 (update_pipes | modeset_pipes))
9993 intel_wait_for_vblank(dev_priv, pipe);
9997 update_pipes = modeset_pipes;
10000 * Enable all pipes that needs a modeset and do not depends on other
10003 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10004 enum pipe pipe = crtc->pipe;
10006 if ((modeset_pipes & BIT(pipe)) == 0)
10009 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10010 is_trans_port_sync_master(new_crtc_state) ||
10011 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10014 modeset_pipes &= ~BIT(pipe);
10016 intel_enable_crtc(state, crtc);
10020 * Then we enable all remaining pipes that depend on other
10021 * pipes: MST slaves and port sync masters, big joiner master
10023 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10024 enum pipe pipe = crtc->pipe;
10026 if ((modeset_pipes & BIT(pipe)) == 0)
10029 modeset_pipes &= ~BIT(pipe);
10031 intel_enable_crtc(state, crtc);
10035 * Finally we do the plane updates/etc. for all pipes that got enabled.
10037 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10038 enum pipe pipe = crtc->pipe;
10040 if ((update_pipes & BIT(pipe)) == 0)
10043 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10044 entries, I915_MAX_PIPES, pipe));
10046 entries[pipe] = new_crtc_state->wm.skl.ddb;
10047 update_pipes &= ~BIT(pipe);
10049 intel_update_crtc(state, crtc);
10052 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10053 drm_WARN_ON(&dev_priv->drm, update_pipes);
10056 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10058 struct intel_atomic_state *state, *next;
10059 struct llist_node *freed;
10061 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10062 llist_for_each_entry_safe(state, next, freed, freed)
10063 drm_atomic_state_put(&state->base);
10066 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10068 struct drm_i915_private *dev_priv =
10069 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10071 intel_atomic_helper_free_state(dev_priv);
10074 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10076 struct wait_queue_entry wait_fence, wait_reset;
10077 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10079 init_wait_entry(&wait_fence, 0);
10080 init_wait_entry(&wait_reset, 0);
10082 prepare_to_wait(&intel_state->commit_ready.wait,
10083 &wait_fence, TASK_UNINTERRUPTIBLE);
10084 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10085 I915_RESET_MODESET),
10086 &wait_reset, TASK_UNINTERRUPTIBLE);
10089 if (i915_sw_fence_done(&intel_state->commit_ready) ||
10090 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10095 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10096 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10097 I915_RESET_MODESET),
10101 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10103 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10104 struct intel_crtc *crtc;
10107 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10109 intel_dsb_cleanup(old_crtc_state);
10112 static void intel_atomic_cleanup_work(struct work_struct *work)
10114 struct intel_atomic_state *state =
10115 container_of(work, struct intel_atomic_state, base.commit_work);
10116 struct drm_i915_private *i915 = to_i915(state->base.dev);
10118 intel_cleanup_dsbs(state);
10119 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10120 drm_atomic_helper_commit_cleanup_done(&state->base);
10121 drm_atomic_state_put(&state->base);
10123 intel_atomic_helper_free_state(i915);
10126 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10128 struct drm_i915_private *i915 = to_i915(state->base.dev);
10129 struct intel_plane *plane;
10130 struct intel_plane_state *plane_state;
10133 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10134 struct drm_framebuffer *fb = plane_state->hw.fb;
10138 fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10142 * The layout of the fast clear color value expected by HW
10143 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10144 * - 4 x 4 bytes per-channel value
10145 * (in surface type specific float/int format provided by the fb user)
10146 * - 8 bytes native color value used by the display
10147 * (converted/written by GPU during a fast clear operation using the
10148 * above per-channel values)
10150 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10151 * caller made sure that the object is synced wrt. the related color clear value
10154 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10155 fb->offsets[2] + 16,
10156 &plane_state->ccval,
10157 sizeof(plane_state->ccval));
10158 /* The above could only fail if the FB obj has an unexpected backing store type. */
10159 drm_WARN_ON(&i915->drm, ret);
10163 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10165 struct drm_device *dev = state->base.dev;
10166 struct drm_i915_private *dev_priv = to_i915(dev);
10167 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10168 struct intel_crtc *crtc;
10169 u64 put_domains[I915_MAX_PIPES] = {};
10170 intel_wakeref_t wakeref = 0;
10173 intel_atomic_commit_fence_wait(state);
10175 drm_atomic_helper_wait_for_dependencies(&state->base);
10177 if (state->modeset)
10178 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10180 intel_atomic_prepare_plane_clear_colors(state);
10182 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10183 new_crtc_state, i) {
10184 if (intel_crtc_needs_modeset(new_crtc_state) ||
10185 new_crtc_state->update_pipe) {
10187 put_domains[crtc->pipe] =
10188 modeset_get_crtc_power_domains(new_crtc_state);
10192 intel_commit_modeset_disables(state);
10194 /* FIXME: Eventually get rid of our crtc->config pointer */
10195 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10196 crtc->config = new_crtc_state;
10198 if (state->modeset) {
10199 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10201 intel_set_cdclk_pre_plane_update(state);
10203 intel_modeset_verify_disabled(dev_priv, state);
10206 intel_sagv_pre_plane_update(state);
10208 /* Complete the events for pipes that have now been disabled */
10209 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10210 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10212 /* Complete events for now disable pipes here. */
10213 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10214 spin_lock_irq(&dev->event_lock);
10215 drm_crtc_send_vblank_event(&crtc->base,
10216 new_crtc_state->uapi.event);
10217 spin_unlock_irq(&dev->event_lock);
10219 new_crtc_state->uapi.event = NULL;
10223 if (state->modeset)
10224 intel_encoders_update_prepare(state);
10226 intel_dbuf_pre_plane_update(state);
10228 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10229 if (new_crtc_state->uapi.async_flip)
10230 intel_crtc_enable_flip_done(state, crtc);
10233 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10234 dev_priv->display.commit_modeset_enables(state);
10236 if (state->modeset) {
10237 intel_encoders_update_complete(state);
10239 intel_set_cdclk_post_plane_update(state);
10242 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10243 * already, but still need the state for the delayed optimization. To
10245 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10246 * - schedule that vblank worker _before_ calling hw_done
10247 * - at the start of commit_tail, cancel it _synchrously
10248 * - switch over to the vblank wait helper in the core after that since
10249 * we don't need out special handling any more.
10251 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10253 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10254 if (new_crtc_state->uapi.async_flip)
10255 intel_crtc_disable_flip_done(state, crtc);
10257 if (new_crtc_state->hw.active &&
10258 !intel_crtc_needs_modeset(new_crtc_state) &&
10259 !new_crtc_state->preload_luts &&
10260 (new_crtc_state->uapi.color_mgmt_changed ||
10261 new_crtc_state->update_pipe))
10262 intel_color_load_luts(new_crtc_state);
10266 * Now that the vblank has passed, we can go ahead and program the
10267 * optimal watermarks on platforms that need two-step watermark
10270 * TODO: Move this (and other cleanup) to an async worker eventually.
10272 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10273 new_crtc_state, i) {
10275 * Gen2 reports pipe underruns whenever all planes are disabled.
10276 * So re-enable underrun reporting after some planes get enabled.
10278 * We do this before .optimize_watermarks() so that we have a
10279 * chance of catching underruns with the intermediate watermarks
10280 * vs. the new plane configuration.
10282 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10283 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10285 if (dev_priv->display.optimize_watermarks)
10286 dev_priv->display.optimize_watermarks(state, crtc);
10289 intel_dbuf_post_plane_update(state);
10291 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10292 intel_post_plane_update(state, crtc);
10294 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10296 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10299 * DSB cleanup is done in cleanup_work aligning with framebuffer
10300 * cleanup. So copy and reset the dsb structure to sync with
10301 * commit_done and later do dsb cleanup in cleanup_work.
10303 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10306 /* Underruns don't always raise interrupts, so check manually */
10307 intel_check_cpu_fifo_underruns(dev_priv);
10308 intel_check_pch_fifo_underruns(dev_priv);
10310 if (state->modeset)
10311 intel_verify_planes(state);
10313 intel_sagv_post_plane_update(state);
10315 drm_atomic_helper_commit_hw_done(&state->base);
10317 if (state->modeset) {
10318 /* As one of the primary mmio accessors, KMS has a high
10319 * likelihood of triggering bugs in unclaimed access. After we
10320 * finish modesetting, see if an error has been flagged, and if
10321 * so enable debugging for the next modeset - and hope we catch
10324 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10325 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10327 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10330 * Defer the cleanup of the old state to a separate worker to not
10331 * impede the current task (userspace for blocking modesets) that
10332 * are executed inline. For out-of-line asynchronous modesets/flips,
10333 * deferring to a new worker seems overkill, but we would place a
10334 * schedule point (cond_resched()) here anyway to keep latencies
10337 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10338 queue_work(system_highpri_wq, &state->base.commit_work);
10341 static void intel_atomic_commit_work(struct work_struct *work)
10343 struct intel_atomic_state *state =
10344 container_of(work, struct intel_atomic_state, base.commit_work);
10346 intel_atomic_commit_tail(state);
10349 static int __i915_sw_fence_call
10350 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10351 enum i915_sw_fence_notify notify)
10353 struct intel_atomic_state *state =
10354 container_of(fence, struct intel_atomic_state, commit_ready);
10357 case FENCE_COMPLETE:
10358 /* we do blocking waits in the worker, nothing to do here */
10362 struct intel_atomic_helper *helper =
10363 &to_i915(state->base.dev)->atomic_helper;
10365 if (llist_add(&state->freed, &helper->free_list))
10366 schedule_work(&helper->free_work);
10371 return NOTIFY_DONE;
10374 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10376 struct intel_plane_state *old_plane_state, *new_plane_state;
10377 struct intel_plane *plane;
10380 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10381 new_plane_state, i)
10382 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10383 to_intel_frontbuffer(new_plane_state->hw.fb),
10384 plane->frontbuffer_bit);
10387 static int intel_atomic_commit(struct drm_device *dev,
10388 struct drm_atomic_state *_state,
10391 struct intel_atomic_state *state = to_intel_atomic_state(_state);
10392 struct drm_i915_private *dev_priv = to_i915(dev);
10395 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10397 drm_atomic_state_get(&state->base);
10398 i915_sw_fence_init(&state->commit_ready,
10399 intel_atomic_commit_ready);
10402 * The intel_legacy_cursor_update() fast path takes care
10403 * of avoiding the vblank waits for simple cursor
10404 * movement and flips. For cursor on/off and size changes,
10405 * we want to perform the vblank waits so that watermark
10406 * updates happen during the correct frames. Gen9+ have
10407 * double buffered watermarks and so shouldn't need this.
10409 * Unset state->legacy_cursor_update before the call to
10410 * drm_atomic_helper_setup_commit() because otherwise
10411 * drm_atomic_helper_wait_for_flip_done() is a noop and
10412 * we get FIFO underruns because we didn't wait
10415 * FIXME doing watermarks and fb cleanup from a vblank worker
10416 * (assuming we had any) would solve these problems.
10418 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10419 struct intel_crtc_state *new_crtc_state;
10420 struct intel_crtc *crtc;
10423 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10424 if (new_crtc_state->wm.need_postvbl_update ||
10425 new_crtc_state->update_wm_post)
10426 state->base.legacy_cursor_update = false;
10429 ret = intel_atomic_prepare_commit(state);
10431 drm_dbg_atomic(&dev_priv->drm,
10432 "Preparing state failed with %i\n", ret);
10433 i915_sw_fence_commit(&state->commit_ready);
10434 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10438 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10440 ret = drm_atomic_helper_swap_state(&state->base, true);
10442 intel_atomic_swap_global_state(state);
10445 struct intel_crtc_state *new_crtc_state;
10446 struct intel_crtc *crtc;
10449 i915_sw_fence_commit(&state->commit_ready);
10451 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10452 intel_dsb_cleanup(new_crtc_state);
10454 drm_atomic_helper_cleanup_planes(dev, &state->base);
10455 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10458 intel_shared_dpll_swap_state(state);
10459 intel_atomic_track_fbs(state);
10461 drm_atomic_state_get(&state->base);
10462 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10464 i915_sw_fence_commit(&state->commit_ready);
10465 if (nonblock && state->modeset) {
10466 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10467 } else if (nonblock) {
10468 queue_work(dev_priv->flip_wq, &state->base.commit_work);
10470 if (state->modeset)
10471 flush_workqueue(dev_priv->modeset_wq);
10472 intel_atomic_commit_tail(state);
10478 struct wait_rps_boost {
10479 struct wait_queue_entry wait;
10481 struct drm_crtc *crtc;
10482 struct i915_request *request;
10485 static int do_rps_boost(struct wait_queue_entry *_wait,
10486 unsigned mode, int sync, void *key)
10488 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10489 struct i915_request *rq = wait->request;
10492 * If we missed the vblank, but the request is already running it
10493 * is reasonable to assume that it will complete before the next
10494 * vblank without our intervention, so leave RPS alone.
10496 if (!i915_request_started(rq))
10497 intel_rps_boost(rq);
10498 i915_request_put(rq);
10500 drm_crtc_vblank_put(wait->crtc);
10502 list_del(&wait->wait.entry);
10507 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10508 struct dma_fence *fence)
10510 struct wait_rps_boost *wait;
10512 if (!dma_fence_is_i915(fence))
10515 if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10518 if (drm_crtc_vblank_get(crtc))
10521 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10523 drm_crtc_vblank_put(crtc);
10527 wait->request = to_request(dma_fence_get(fence));
10530 wait->wait.func = do_rps_boost;
10531 wait->wait.flags = 0;
10533 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10536 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10538 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10539 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10540 struct drm_framebuffer *fb = plane_state->hw.fb;
10541 struct i915_vma *vma;
10543 plane->id == PLANE_CURSOR &&
10544 INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10546 vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10547 &plane_state->view.gtt,
10548 intel_plane_uses_fence(plane_state),
10549 &plane_state->flags);
10551 return PTR_ERR(vma);
10553 plane_state->vma = vma;
10558 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10560 struct i915_vma *vma;
10562 vma = fetch_and_zero(&old_plane_state->vma);
10564 intel_unpin_fb_vma(vma, old_plane_state->flags);
10568 * intel_prepare_plane_fb - Prepare fb for usage on plane
10569 * @_plane: drm plane to prepare for
10570 * @_new_plane_state: the plane state being prepared
10572 * Prepares a framebuffer for usage on a display plane. Generally this
10573 * involves pinning the underlying object and updating the frontbuffer tracking
10574 * bits. Some older platforms need special physical address handling for
10577 * Returns 0 on success, negative error code on failure.
10580 intel_prepare_plane_fb(struct drm_plane *_plane,
10581 struct drm_plane_state *_new_plane_state)
10583 struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10584 struct intel_plane *plane = to_intel_plane(_plane);
10585 struct intel_plane_state *new_plane_state =
10586 to_intel_plane_state(_new_plane_state);
10587 struct intel_atomic_state *state =
10588 to_intel_atomic_state(new_plane_state->uapi.state);
10589 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10590 const struct intel_plane_state *old_plane_state =
10591 intel_atomic_get_old_plane_state(state, plane);
10592 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10593 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10597 const struct intel_crtc_state *crtc_state =
10598 intel_atomic_get_new_crtc_state(state,
10599 to_intel_crtc(old_plane_state->hw.crtc));
10601 /* Big Hammer, we also need to ensure that any pending
10602 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10603 * current scanout is retired before unpinning the old
10604 * framebuffer. Note that we rely on userspace rendering
10605 * into the buffer attached to the pipe they are waiting
10606 * on. If not, userspace generates a GPU hang with IPEHR
10607 * point to the MI_WAIT_FOR_EVENT.
10609 * This should only fail upon a hung GPU, in which case we
10610 * can safely continue.
10612 if (intel_crtc_needs_modeset(crtc_state)) {
10613 ret = i915_sw_fence_await_reservation(&state->commit_ready,
10614 old_obj->base.resv, NULL,
10622 if (new_plane_state->uapi.fence) { /* explicit fencing */
10623 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
10625 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
10626 new_plane_state->uapi.fence,
10627 i915_fence_timeout(dev_priv),
10637 ret = intel_plane_pin_fb(new_plane_state);
10641 i915_gem_object_wait_priority(obj, 0, &attr);
10642 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
10644 if (!new_plane_state->uapi.fence) { /* implicit fencing */
10645 struct dma_fence *fence;
10647 ret = i915_sw_fence_await_reservation(&state->commit_ready,
10648 obj->base.resv, NULL,
10650 i915_fence_timeout(dev_priv),
10655 fence = dma_resv_get_excl_rcu(obj->base.resv);
10657 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10659 dma_fence_put(fence);
10662 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10663 new_plane_state->uapi.fence);
10667 * We declare pageflips to be interactive and so merit a small bias
10668 * towards upclocking to deliver the frame on time. By only changing
10669 * the RPS thresholds to sample more regularly and aim for higher
10670 * clocks we can hopefully deliver low power workloads (like kodi)
10671 * that are not quite steady state without resorting to forcing
10672 * maximum clocks following a vblank miss (see do_rps_boost()).
10674 if (!state->rps_interactive) {
10675 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
10676 state->rps_interactive = true;
10682 intel_plane_unpin_fb(new_plane_state);
10688 * intel_cleanup_plane_fb - Cleans up an fb after plane use
10689 * @plane: drm plane to clean up for
10690 * @_old_plane_state: the state from the previous modeset
10692 * Cleans up a framebuffer that has just been removed from a plane.
10695 intel_cleanup_plane_fb(struct drm_plane *plane,
10696 struct drm_plane_state *_old_plane_state)
10698 struct intel_plane_state *old_plane_state =
10699 to_intel_plane_state(_old_plane_state);
10700 struct intel_atomic_state *state =
10701 to_intel_atomic_state(old_plane_state->uapi.state);
10702 struct drm_i915_private *dev_priv = to_i915(plane->dev);
10703 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
10708 if (state->rps_interactive) {
10709 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
10710 state->rps_interactive = false;
10713 /* Should only be called after a successful intel_prepare_plane_fb()! */
10714 intel_plane_unpin_fb(old_plane_state);
10718 * intel_plane_destroy - destroy a plane
10719 * @plane: plane to destroy
10721 * Common destruction function for all types of planes (primary, cursor,
10724 void intel_plane_destroy(struct drm_plane *plane)
10726 drm_plane_cleanup(plane);
10727 kfree(to_intel_plane(plane));
10730 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
10732 struct intel_plane *plane;
10734 for_each_intel_plane(&dev_priv->drm, plane) {
10735 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
10738 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
10743 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
10744 struct drm_file *file)
10746 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10747 struct drm_crtc *drmmode_crtc;
10748 struct intel_crtc *crtc;
10750 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
10754 crtc = to_intel_crtc(drmmode_crtc);
10755 pipe_from_crtc_id->pipe = crtc->pipe;
10760 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
10762 struct drm_device *dev = encoder->base.dev;
10763 struct intel_encoder *source_encoder;
10764 u32 possible_clones = 0;
10766 for_each_intel_encoder(dev, source_encoder) {
10767 if (encoders_cloneable(encoder, source_encoder))
10768 possible_clones |= drm_encoder_mask(&source_encoder->base);
10771 return possible_clones;
10774 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
10776 struct drm_device *dev = encoder->base.dev;
10777 struct intel_crtc *crtc;
10778 u32 possible_crtcs = 0;
10780 for_each_intel_crtc(dev, crtc) {
10781 if (encoder->pipe_mask & BIT(crtc->pipe))
10782 possible_crtcs |= drm_crtc_mask(&crtc->base);
10785 return possible_crtcs;
10788 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
10790 if (!IS_MOBILE(dev_priv))
10793 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
10796 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
10802 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
10804 if (DISPLAY_VER(dev_priv) >= 9)
10807 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
10810 if (HAS_PCH_LPT_H(dev_priv) &&
10811 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
10814 /* DDI E can't be used if DDI A requires 4 lanes */
10815 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
10818 if (!dev_priv->vbt.int_crt_support)
10824 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
10826 struct intel_encoder *encoder;
10827 bool dpd_is_edp = false;
10829 intel_pps_unlock_regs_wa(dev_priv);
10831 if (!HAS_DISPLAY(dev_priv))
10834 if (IS_ALDERLAKE_S(dev_priv)) {
10835 intel_ddi_init(dev_priv, PORT_A);
10836 intel_ddi_init(dev_priv, PORT_TC1);
10837 intel_ddi_init(dev_priv, PORT_TC2);
10838 intel_ddi_init(dev_priv, PORT_TC3);
10839 intel_ddi_init(dev_priv, PORT_TC4);
10840 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
10841 intel_ddi_init(dev_priv, PORT_A);
10842 intel_ddi_init(dev_priv, PORT_B);
10843 intel_ddi_init(dev_priv, PORT_TC1);
10844 intel_ddi_init(dev_priv, PORT_TC2);
10845 } else if (DISPLAY_VER(dev_priv) >= 12) {
10846 intel_ddi_init(dev_priv, PORT_A);
10847 intel_ddi_init(dev_priv, PORT_B);
10848 intel_ddi_init(dev_priv, PORT_TC1);
10849 intel_ddi_init(dev_priv, PORT_TC2);
10850 intel_ddi_init(dev_priv, PORT_TC3);
10851 intel_ddi_init(dev_priv, PORT_TC4);
10852 intel_ddi_init(dev_priv, PORT_TC5);
10853 intel_ddi_init(dev_priv, PORT_TC6);
10854 icl_dsi_init(dev_priv);
10855 } else if (IS_JSL_EHL(dev_priv)) {
10856 intel_ddi_init(dev_priv, PORT_A);
10857 intel_ddi_init(dev_priv, PORT_B);
10858 intel_ddi_init(dev_priv, PORT_C);
10859 intel_ddi_init(dev_priv, PORT_D);
10860 icl_dsi_init(dev_priv);
10861 } else if (DISPLAY_VER(dev_priv) == 11) {
10862 intel_ddi_init(dev_priv, PORT_A);
10863 intel_ddi_init(dev_priv, PORT_B);
10864 intel_ddi_init(dev_priv, PORT_C);
10865 intel_ddi_init(dev_priv, PORT_D);
10866 intel_ddi_init(dev_priv, PORT_E);
10868 * On some ICL SKUs port F is not present. No strap bits for
10869 * this, so rely on VBT.
10870 * Work around broken VBTs on SKUs known to have no port F.
10872 if (IS_ICL_WITH_PORT_F(dev_priv) &&
10873 intel_bios_is_port_present(dev_priv, PORT_F))
10874 intel_ddi_init(dev_priv, PORT_F);
10876 icl_dsi_init(dev_priv);
10877 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
10879 * FIXME: Broxton doesn't support port detection via the
10880 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
10881 * detect the ports.
10883 intel_ddi_init(dev_priv, PORT_A);
10884 intel_ddi_init(dev_priv, PORT_B);
10885 intel_ddi_init(dev_priv, PORT_C);
10887 vlv_dsi_init(dev_priv);
10888 } else if (HAS_DDI(dev_priv)) {
10891 if (intel_ddi_crt_present(dev_priv))
10892 intel_crt_init(dev_priv);
10895 * Haswell uses DDI functions to detect digital outputs.
10896 * On SKL pre-D0 the strap isn't connected. Later SKUs may or
10897 * may not have it - it was supposed to be fixed by the same
10898 * time we stopped using straps. Assume it's there.
10900 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
10901 /* WaIgnoreDDIAStrap: skl */
10902 if (found || DISPLAY_VER(dev_priv) == 9)
10903 intel_ddi_init(dev_priv, PORT_A);
10905 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
10907 if (HAS_PCH_TGP(dev_priv)) {
10908 /* W/A due to lack of STRAP config on TGP PCH*/
10909 found = (SFUSE_STRAP_DDIB_DETECTED |
10910 SFUSE_STRAP_DDIC_DETECTED |
10911 SFUSE_STRAP_DDID_DETECTED);
10913 found = intel_de_read(dev_priv, SFUSE_STRAP);
10916 if (found & SFUSE_STRAP_DDIB_DETECTED)
10917 intel_ddi_init(dev_priv, PORT_B);
10918 if (found & SFUSE_STRAP_DDIC_DETECTED)
10919 intel_ddi_init(dev_priv, PORT_C);
10920 if (found & SFUSE_STRAP_DDID_DETECTED)
10921 intel_ddi_init(dev_priv, PORT_D);
10922 if (found & SFUSE_STRAP_DDIF_DETECTED)
10923 intel_ddi_init(dev_priv, PORT_F);
10925 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
10927 if (DISPLAY_VER(dev_priv) == 9 &&
10928 intel_bios_is_port_present(dev_priv, PORT_E))
10929 intel_ddi_init(dev_priv, PORT_E);
10931 } else if (HAS_PCH_SPLIT(dev_priv)) {
10935 * intel_edp_init_connector() depends on this completing first,
10936 * to prevent the registration of both eDP and LVDS and the
10937 * incorrect sharing of the PPS.
10939 intel_lvds_init(dev_priv);
10940 intel_crt_init(dev_priv);
10942 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
10944 if (ilk_has_edp_a(dev_priv))
10945 g4x_dp_init(dev_priv, DP_A, PORT_A);
10947 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
10948 /* PCH SDVOB multiplex with HDMIB */
10949 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
10951 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
10952 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
10953 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
10956 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
10957 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
10959 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
10960 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
10962 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
10963 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
10965 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
10966 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
10967 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10968 bool has_edp, has_port;
10970 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
10971 intel_crt_init(dev_priv);
10974 * The DP_DETECTED bit is the latched state of the DDC
10975 * SDA pin at boot. However since eDP doesn't require DDC
10976 * (no way to plug in a DP->HDMI dongle) the DDC pins for
10977 * eDP ports may have been muxed to an alternate function.
10978 * Thus we can't rely on the DP_DETECTED bit alone to detect
10979 * eDP ports. Consult the VBT as well as DP_DETECTED to
10980 * detect eDP ports.
10982 * Sadly the straps seem to be missing sometimes even for HDMI
10983 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
10984 * and VBT for the presence of the port. Additionally we can't
10985 * trust the port type the VBT declares as we've seen at least
10986 * HDMI ports that the VBT claim are DP or eDP.
10988 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
10989 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
10990 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
10991 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
10992 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
10993 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
10995 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
10996 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
10997 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
10998 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
10999 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11000 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11002 if (IS_CHERRYVIEW(dev_priv)) {
11004 * eDP not supported on port D,
11005 * so no need to worry about it
11007 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11008 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11009 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11010 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11011 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11014 vlv_dsi_init(dev_priv);
11015 } else if (IS_PINEVIEW(dev_priv)) {
11016 intel_lvds_init(dev_priv);
11017 intel_crt_init(dev_priv);
11018 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11019 bool found = false;
11021 if (IS_MOBILE(dev_priv))
11022 intel_lvds_init(dev_priv);
11024 intel_crt_init(dev_priv);
11026 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11027 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11028 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11029 if (!found && IS_G4X(dev_priv)) {
11030 drm_dbg_kms(&dev_priv->drm,
11031 "probing HDMI on SDVOB\n");
11032 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11035 if (!found && IS_G4X(dev_priv))
11036 g4x_dp_init(dev_priv, DP_B, PORT_B);
11039 /* Before G4X SDVOC doesn't have its own detect register */
11041 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11042 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11043 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11046 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11048 if (IS_G4X(dev_priv)) {
11049 drm_dbg_kms(&dev_priv->drm,
11050 "probing HDMI on SDVOC\n");
11051 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11053 if (IS_G4X(dev_priv))
11054 g4x_dp_init(dev_priv, DP_C, PORT_C);
11057 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11058 g4x_dp_init(dev_priv, DP_D, PORT_D);
11060 if (SUPPORTS_TV(dev_priv))
11061 intel_tv_init(dev_priv);
11062 } else if (DISPLAY_VER(dev_priv) == 2) {
11063 if (IS_I85X(dev_priv))
11064 intel_lvds_init(dev_priv);
11066 intel_crt_init(dev_priv);
11067 intel_dvo_init(dev_priv);
11070 for_each_intel_encoder(&dev_priv->drm, encoder) {
11071 encoder->base.possible_crtcs =
11072 intel_encoder_possible_crtcs(encoder);
11073 encoder->base.possible_clones =
11074 intel_encoder_possible_clones(encoder);
11077 intel_init_pch_refclk(dev_priv);
11079 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11082 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11084 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11086 drm_framebuffer_cleanup(fb);
11087 intel_frontbuffer_put(intel_fb->frontbuffer);
11092 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11093 struct drm_file *file,
11094 unsigned int *handle)
11096 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11097 struct drm_i915_private *i915 = to_i915(obj->base.dev);
11099 if (i915_gem_object_is_userptr(obj)) {
11100 drm_dbg(&i915->drm,
11101 "attempting to use a userptr for a framebuffer, denied\n");
11105 return drm_gem_handle_create(file, &obj->base, handle);
11108 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11109 struct drm_file *file,
11110 unsigned flags, unsigned color,
11111 struct drm_clip_rect *clips,
11112 unsigned num_clips)
11114 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11116 i915_gem_object_flush_if_display(obj);
11117 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11122 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11123 .destroy = intel_user_framebuffer_destroy,
11124 .create_handle = intel_user_framebuffer_create_handle,
11125 .dirty = intel_user_framebuffer_dirty,
11128 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11129 struct drm_i915_gem_object *obj,
11130 struct drm_mode_fb_cmd2 *mode_cmd)
11132 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11133 struct drm_framebuffer *fb = &intel_fb->base;
11135 unsigned int tiling, stride;
11139 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11140 if (!intel_fb->frontbuffer)
11143 i915_gem_object_lock(obj, NULL);
11144 tiling = i915_gem_object_get_tiling(obj);
11145 stride = i915_gem_object_get_stride(obj);
11146 i915_gem_object_unlock(obj);
11148 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11150 * If there's a fence, enforce that
11151 * the fb modifier and tiling mode match.
11153 if (tiling != I915_TILING_NONE &&
11154 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11155 drm_dbg_kms(&dev_priv->drm,
11156 "tiling_mode doesn't match fb modifier\n");
11160 if (tiling == I915_TILING_X) {
11161 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11162 } else if (tiling == I915_TILING_Y) {
11163 drm_dbg_kms(&dev_priv->drm,
11164 "No Y tiling for legacy addfb\n");
11169 if (!drm_any_plane_has_format(&dev_priv->drm,
11170 mode_cmd->pixel_format,
11171 mode_cmd->modifier[0])) {
11172 drm_dbg_kms(&dev_priv->drm,
11173 "unsupported pixel format %p4cc / modifier 0x%llx\n",
11174 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11179 * gen2/3 display engine uses the fence if present,
11180 * so the tiling mode must match the fb modifier exactly.
11182 if (DISPLAY_VER(dev_priv) < 4 &&
11183 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11184 drm_dbg_kms(&dev_priv->drm,
11185 "tiling_mode must match fb modifier exactly on gen2/3\n");
11189 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11190 mode_cmd->modifier[0]);
11191 if (mode_cmd->pitches[0] > max_stride) {
11192 drm_dbg_kms(&dev_priv->drm,
11193 "%s pitch (%u) must be at most %d\n",
11194 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11195 "tiled" : "linear",
11196 mode_cmd->pitches[0], max_stride);
11201 * If there's a fence, enforce that
11202 * the fb pitch and fence stride match.
11204 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11205 drm_dbg_kms(&dev_priv->drm,
11206 "pitch (%d) must match tiling stride (%d)\n",
11207 mode_cmd->pitches[0], stride);
11211 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11212 if (mode_cmd->offsets[0] != 0) {
11213 drm_dbg_kms(&dev_priv->drm,
11214 "plane 0 offset (0x%08x) must be 0\n",
11215 mode_cmd->offsets[0]);
11219 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11221 for (i = 0; i < fb->format->num_planes; i++) {
11222 u32 stride_alignment;
11224 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11225 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11230 stride_alignment = intel_fb_stride_alignment(fb, i);
11231 if (fb->pitches[i] & (stride_alignment - 1)) {
11232 drm_dbg_kms(&dev_priv->drm,
11233 "plane %d pitch (%d) must be at least %u byte aligned\n",
11234 i, fb->pitches[i], stride_alignment);
11238 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11239 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11241 if (fb->pitches[i] != ccs_aux_stride) {
11242 drm_dbg_kms(&dev_priv->drm,
11243 "ccs aux plane %d pitch (%d) must be %d\n",
11245 fb->pitches[i], ccs_aux_stride);
11250 fb->obj[i] = &obj->base;
11253 ret = intel_fill_fb_info(dev_priv, fb);
11257 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11259 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11266 intel_frontbuffer_put(intel_fb->frontbuffer);
11270 static struct drm_framebuffer *
11271 intel_user_framebuffer_create(struct drm_device *dev,
11272 struct drm_file *filp,
11273 const struct drm_mode_fb_cmd2 *user_mode_cmd)
11275 struct drm_framebuffer *fb;
11276 struct drm_i915_gem_object *obj;
11277 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11278 struct drm_i915_private *i915;
11280 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11282 return ERR_PTR(-ENOENT);
11284 /* object is backed with LMEM for discrete */
11285 i915 = to_i915(obj->base.dev);
11286 if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
11287 /* object is "remote", not in local memory */
11288 i915_gem_object_put(obj);
11289 return ERR_PTR(-EREMOTE);
11292 fb = intel_framebuffer_create(obj, &mode_cmd);
11293 i915_gem_object_put(obj);
11298 static enum drm_mode_status
11299 intel_mode_valid(struct drm_device *dev,
11300 const struct drm_display_mode *mode)
11302 struct drm_i915_private *dev_priv = to_i915(dev);
11303 int hdisplay_max, htotal_max;
11304 int vdisplay_max, vtotal_max;
11307 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11308 * of DBLSCAN modes to the output's mode list when they detect
11309 * the scaling mode property on the connector. And they don't
11310 * ask the kernel to validate those modes in any way until
11311 * modeset time at which point the client gets a protocol error.
11312 * So in order to not upset those clients we silently ignore the
11313 * DBLSCAN flag on such connectors. For other connectors we will
11314 * reject modes with the DBLSCAN flag in encoder->compute_config().
11315 * And we always reject DBLSCAN modes in connector->mode_valid()
11316 * as we never want such modes on the connector's mode list.
11319 if (mode->vscan > 1)
11320 return MODE_NO_VSCAN;
11322 if (mode->flags & DRM_MODE_FLAG_HSKEW)
11323 return MODE_H_ILLEGAL;
11325 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11326 DRM_MODE_FLAG_NCSYNC |
11327 DRM_MODE_FLAG_PCSYNC))
11330 if (mode->flags & (DRM_MODE_FLAG_BCAST |
11331 DRM_MODE_FLAG_PIXMUX |
11332 DRM_MODE_FLAG_CLKDIV2))
11335 /* Transcoder timing limits */
11336 if (DISPLAY_VER(dev_priv) >= 11) {
11337 hdisplay_max = 16384;
11338 vdisplay_max = 8192;
11339 htotal_max = 16384;
11341 } else if (DISPLAY_VER(dev_priv) >= 9 ||
11342 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11343 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11344 vdisplay_max = 4096;
11347 } else if (DISPLAY_VER(dev_priv) >= 3) {
11348 hdisplay_max = 4096;
11349 vdisplay_max = 4096;
11353 hdisplay_max = 2048;
11354 vdisplay_max = 2048;
11359 if (mode->hdisplay > hdisplay_max ||
11360 mode->hsync_start > htotal_max ||
11361 mode->hsync_end > htotal_max ||
11362 mode->htotal > htotal_max)
11363 return MODE_H_ILLEGAL;
11365 if (mode->vdisplay > vdisplay_max ||
11366 mode->vsync_start > vtotal_max ||
11367 mode->vsync_end > vtotal_max ||
11368 mode->vtotal > vtotal_max)
11369 return MODE_V_ILLEGAL;
11371 if (DISPLAY_VER(dev_priv) >= 5) {
11372 if (mode->hdisplay < 64 ||
11373 mode->htotal - mode->hdisplay < 32)
11374 return MODE_H_ILLEGAL;
11376 if (mode->vtotal - mode->vdisplay < 5)
11377 return MODE_V_ILLEGAL;
11379 if (mode->htotal - mode->hdisplay < 32)
11380 return MODE_H_ILLEGAL;
11382 if (mode->vtotal - mode->vdisplay < 3)
11383 return MODE_V_ILLEGAL;
11389 enum drm_mode_status
11390 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11391 const struct drm_display_mode *mode,
11394 int plane_width_max, plane_height_max;
11397 * intel_mode_valid() should be
11398 * sufficient on older platforms.
11400 if (DISPLAY_VER(dev_priv) < 9)
11404 * Most people will probably want a fullscreen
11405 * plane so let's not advertize modes that are
11406 * too big for that.
11408 if (DISPLAY_VER(dev_priv) >= 11) {
11409 plane_width_max = 5120 << bigjoiner;
11410 plane_height_max = 4320;
11412 plane_width_max = 5120;
11413 plane_height_max = 4096;
11416 if (mode->hdisplay > plane_width_max)
11417 return MODE_H_ILLEGAL;
11419 if (mode->vdisplay > plane_height_max)
11420 return MODE_V_ILLEGAL;
11425 static const struct drm_mode_config_funcs intel_mode_funcs = {
11426 .fb_create = intel_user_framebuffer_create,
11427 .get_format_info = intel_get_format_info,
11428 .output_poll_changed = intel_fbdev_output_poll_changed,
11429 .mode_valid = intel_mode_valid,
11430 .atomic_check = intel_atomic_check,
11431 .atomic_commit = intel_atomic_commit,
11432 .atomic_state_alloc = intel_atomic_state_alloc,
11433 .atomic_state_clear = intel_atomic_state_clear,
11434 .atomic_state_free = intel_atomic_state_free,
11438 * intel_init_display_hooks - initialize the display modesetting hooks
11439 * @dev_priv: device private
11441 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11443 intel_init_cdclk_hooks(dev_priv);
11444 intel_init_audio_hooks(dev_priv);
11446 intel_dpll_init_clock_hook(dev_priv);
11448 if (DISPLAY_VER(dev_priv) >= 9) {
11449 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11450 dev_priv->display.crtc_enable = hsw_crtc_enable;
11451 dev_priv->display.crtc_disable = hsw_crtc_disable;
11452 } else if (HAS_DDI(dev_priv)) {
11453 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11454 dev_priv->display.crtc_enable = hsw_crtc_enable;
11455 dev_priv->display.crtc_disable = hsw_crtc_disable;
11456 } else if (HAS_PCH_SPLIT(dev_priv)) {
11457 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11458 dev_priv->display.crtc_enable = ilk_crtc_enable;
11459 dev_priv->display.crtc_disable = ilk_crtc_disable;
11460 } else if (IS_CHERRYVIEW(dev_priv) ||
11461 IS_VALLEYVIEW(dev_priv)) {
11462 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11463 dev_priv->display.crtc_enable = valleyview_crtc_enable;
11464 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11466 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11467 dev_priv->display.crtc_enable = i9xx_crtc_enable;
11468 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11471 intel_fdi_init_hook(dev_priv);
11473 if (DISPLAY_VER(dev_priv) >= 9) {
11474 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11475 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11477 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11478 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11483 void intel_modeset_init_hw(struct drm_i915_private *i915)
11485 struct intel_cdclk_state *cdclk_state =
11486 to_intel_cdclk_state(i915->cdclk.obj.state);
11488 intel_update_cdclk(i915);
11489 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11490 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11493 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11495 struct drm_plane *plane;
11496 struct intel_crtc *crtc;
11498 for_each_intel_crtc(state->dev, crtc) {
11499 struct intel_crtc_state *crtc_state;
11501 crtc_state = intel_atomic_get_crtc_state(state, crtc);
11502 if (IS_ERR(crtc_state))
11503 return PTR_ERR(crtc_state);
11505 if (crtc_state->hw.active) {
11507 * Preserve the inherited flag to avoid
11508 * taking the full modeset path.
11510 crtc_state->inherited = true;
11514 drm_for_each_plane(plane, state->dev) {
11515 struct drm_plane_state *plane_state;
11517 plane_state = drm_atomic_get_plane_state(state, plane);
11518 if (IS_ERR(plane_state))
11519 return PTR_ERR(plane_state);
11526 * Calculate what we think the watermarks should be for the state we've read
11527 * out of the hardware and then immediately program those watermarks so that
11528 * we ensure the hardware settings match our internal state.
11530 * We can calculate what we think WM's should be by creating a duplicate of the
11531 * current state (which was constructed during hardware readout) and running it
11532 * through the atomic check code to calculate new watermark values in the
11535 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11537 struct drm_atomic_state *state;
11538 struct intel_atomic_state *intel_state;
11539 struct intel_crtc *crtc;
11540 struct intel_crtc_state *crtc_state;
11541 struct drm_modeset_acquire_ctx ctx;
11545 /* Only supported on platforms that use atomic watermark design */
11546 if (!dev_priv->display.optimize_watermarks)
11549 state = drm_atomic_state_alloc(&dev_priv->drm);
11550 if (drm_WARN_ON(&dev_priv->drm, !state))
11553 intel_state = to_intel_atomic_state(state);
11555 drm_modeset_acquire_init(&ctx, 0);
11558 state->acquire_ctx = &ctx;
11561 * Hardware readout is the only time we don't want to calculate
11562 * intermediate watermarks (since we don't trust the current
11565 if (!HAS_GMCH(dev_priv))
11566 intel_state->skip_intermediate_wm = true;
11568 ret = sanitize_watermarks_add_affected(state);
11572 ret = intel_atomic_check(&dev_priv->drm, state);
11576 /* Write calculated watermark values back */
11577 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11578 crtc_state->wm.need_postvbl_update = true;
11579 dev_priv->display.optimize_watermarks(intel_state, crtc);
11581 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11585 if (ret == -EDEADLK) {
11586 drm_atomic_state_clear(state);
11587 drm_modeset_backoff(&ctx);
11592 * If we fail here, it means that the hardware appears to be
11593 * programmed in a way that shouldn't be possible, given our
11594 * understanding of watermark requirements. This might mean a
11595 * mistake in the hardware readout code or a mistake in the
11596 * watermark calculations for a given platform. Raise a WARN
11597 * so that this is noticeable.
11599 * If this actually happens, we'll have to just leave the
11600 * BIOS-programmed watermarks untouched and hope for the best.
11602 drm_WARN(&dev_priv->drm, ret,
11603 "Could not determine valid watermarks for inherited state\n");
11605 drm_atomic_state_put(state);
11607 drm_modeset_drop_locks(&ctx);
11608 drm_modeset_acquire_fini(&ctx);
11611 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
11613 if (IS_IRONLAKE(dev_priv)) {
11615 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
11617 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
11618 } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
11619 dev_priv->fdi_pll_freq = 270000;
11624 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
11627 static int intel_initial_commit(struct drm_device *dev)
11629 struct drm_atomic_state *state = NULL;
11630 struct drm_modeset_acquire_ctx ctx;
11631 struct intel_crtc *crtc;
11634 state = drm_atomic_state_alloc(dev);
11638 drm_modeset_acquire_init(&ctx, 0);
11641 state->acquire_ctx = &ctx;
11643 for_each_intel_crtc(dev, crtc) {
11644 struct intel_crtc_state *crtc_state =
11645 intel_atomic_get_crtc_state(state, crtc);
11647 if (IS_ERR(crtc_state)) {
11648 ret = PTR_ERR(crtc_state);
11652 if (crtc_state->hw.active) {
11653 struct intel_encoder *encoder;
11656 * We've not yet detected sink capabilities
11657 * (audio,infoframes,etc.) and thus we don't want to
11658 * force a full state recomputation yet. We want that to
11659 * happen only for the first real commit from userspace.
11660 * So preserve the inherited flag for the time being.
11662 crtc_state->inherited = true;
11664 ret = drm_atomic_add_affected_planes(state, &crtc->base);
11669 * FIXME hack to force a LUT update to avoid the
11670 * plane update forcing the pipe gamma on without
11671 * having a proper LUT loaded. Remove once we
11672 * have readout for pipe gamma enable.
11674 crtc_state->uapi.color_mgmt_changed = true;
11676 for_each_intel_encoder_mask(dev, encoder,
11677 crtc_state->uapi.encoder_mask) {
11678 if (encoder->initial_fastset_check &&
11679 !encoder->initial_fastset_check(encoder, crtc_state)) {
11680 ret = drm_atomic_add_affected_connectors(state,
11689 ret = drm_atomic_commit(state);
11692 if (ret == -EDEADLK) {
11693 drm_atomic_state_clear(state);
11694 drm_modeset_backoff(&ctx);
11698 drm_atomic_state_put(state);
11700 drm_modeset_drop_locks(&ctx);
11701 drm_modeset_acquire_fini(&ctx);
11706 static void intel_mode_config_init(struct drm_i915_private *i915)
11708 struct drm_mode_config *mode_config = &i915->drm.mode_config;
11710 drm_mode_config_init(&i915->drm);
11711 INIT_LIST_HEAD(&i915->global_obj_list);
11713 mode_config->min_width = 0;
11714 mode_config->min_height = 0;
11716 mode_config->preferred_depth = 24;
11717 mode_config->prefer_shadow = 1;
11719 mode_config->allow_fb_modifiers = true;
11721 mode_config->funcs = &intel_mode_funcs;
11723 mode_config->async_page_flip = has_async_flips(i915);
11726 * Maximum framebuffer dimensions, chosen to match
11727 * the maximum render engine surface size on gen4+.
11729 if (DISPLAY_VER(i915) >= 7) {
11730 mode_config->max_width = 16384;
11731 mode_config->max_height = 16384;
11732 } else if (DISPLAY_VER(i915) >= 4) {
11733 mode_config->max_width = 8192;
11734 mode_config->max_height = 8192;
11735 } else if (DISPLAY_VER(i915) == 3) {
11736 mode_config->max_width = 4096;
11737 mode_config->max_height = 4096;
11739 mode_config->max_width = 2048;
11740 mode_config->max_height = 2048;
11743 if (IS_I845G(i915) || IS_I865G(i915)) {
11744 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
11745 mode_config->cursor_height = 1023;
11746 } else if (IS_I830(i915) || IS_I85X(i915) ||
11747 IS_I915G(i915) || IS_I915GM(i915)) {
11748 mode_config->cursor_width = 64;
11749 mode_config->cursor_height = 64;
11751 mode_config->cursor_width = 256;
11752 mode_config->cursor_height = 256;
11756 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
11758 intel_atomic_global_obj_cleanup(i915);
11759 drm_mode_config_cleanup(&i915->drm);
11762 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
11764 if (plane_config->fb) {
11765 struct drm_framebuffer *fb = &plane_config->fb->base;
11767 /* We may only have the stub and not a full framebuffer */
11768 if (drm_framebuffer_read_refcount(fb))
11769 drm_framebuffer_put(fb);
11774 if (plane_config->vma)
11775 i915_vma_put(plane_config->vma);
11778 /* part #1: call before irq install */
11779 int intel_modeset_init_noirq(struct drm_i915_private *i915)
11783 if (i915_inject_probe_failure(i915))
11786 if (HAS_DISPLAY(i915)) {
11787 ret = drm_vblank_init(&i915->drm,
11788 INTEL_NUM_PIPES(i915));
11793 intel_bios_init(i915);
11795 ret = intel_vga_register(i915);
11799 /* FIXME: completely on the wrong abstraction layer */
11800 intel_power_domains_init_hw(i915, false);
11802 intel_csr_ucode_init(i915);
11804 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
11805 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
11806 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
11808 i915->framestart_delay = 1; /* 1-4 */
11810 intel_mode_config_init(i915);
11812 ret = intel_cdclk_init(i915);
11814 goto cleanup_vga_client_pw_domain_csr;
11816 ret = intel_dbuf_init(i915);
11818 goto cleanup_vga_client_pw_domain_csr;
11820 ret = intel_bw_init(i915);
11822 goto cleanup_vga_client_pw_domain_csr;
11824 init_llist_head(&i915->atomic_helper.free_list);
11825 INIT_WORK(&i915->atomic_helper.free_work,
11826 intel_atomic_helper_free_state_worker);
11828 intel_init_quirks(i915);
11830 intel_fbc_init(i915);
11834 cleanup_vga_client_pw_domain_csr:
11835 intel_csr_ucode_fini(i915);
11836 intel_power_domains_driver_remove(i915);
11837 intel_vga_unregister(i915);
11839 intel_bios_driver_remove(i915);
11844 /* part #2: call after irq install, but before gem init */
11845 int intel_modeset_init_nogem(struct drm_i915_private *i915)
11847 struct drm_device *dev = &i915->drm;
11849 struct intel_crtc *crtc;
11852 intel_init_pm(i915);
11854 intel_panel_sanitize_ssc(i915);
11856 intel_pps_setup(i915);
11858 intel_gmbus_setup(i915);
11860 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
11861 INTEL_NUM_PIPES(i915),
11862 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
11864 if (HAS_DISPLAY(i915)) {
11865 for_each_pipe(i915, pipe) {
11866 ret = intel_crtc_init(i915, pipe);
11868 intel_mode_config_cleanup(i915);
11874 intel_plane_possible_crtcs_init(i915);
11875 intel_shared_dpll_init(dev);
11876 intel_update_fdi_pll_freq(i915);
11878 intel_update_czclk(i915);
11879 intel_modeset_init_hw(i915);
11880 intel_dpll_update_ref_clks(i915);
11882 intel_hdcp_component_init(i915);
11884 if (i915->max_cdclk_freq == 0)
11885 intel_update_max_cdclk(i915);
11888 * If the platform has HTI, we need to find out whether it has reserved
11889 * any display resources before we create our display outputs.
11891 if (INTEL_INFO(i915)->display.has_hti)
11892 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
11894 /* Just disable it once at startup */
11895 intel_vga_disable(i915);
11896 intel_setup_outputs(i915);
11898 drm_modeset_lock_all(dev);
11899 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
11900 drm_modeset_unlock_all(dev);
11902 for_each_intel_crtc(dev, crtc) {
11903 struct intel_initial_plane_config plane_config = {};
11905 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
11909 * Note that reserving the BIOS fb up front prevents us
11910 * from stuffing other stolen allocations like the ring
11911 * on top. This prevents some ugliness at boot time, and
11912 * can even allow for smooth boot transitions if the BIOS
11913 * fb is large enough for the active pipe configuration.
11915 i915->display.get_initial_plane_config(crtc, &plane_config);
11918 * If the fb is shared between multiple heads, we'll
11919 * just get the first one.
11921 intel_find_initial_plane_obj(crtc, &plane_config);
11923 plane_config_fini(&plane_config);
11927 * Make sure hardware watermarks really match the state we read out.
11928 * Note that we need to do this after reconstructing the BIOS fb's
11929 * since the watermark calculation done here will use pstate->fb.
11931 if (!HAS_GMCH(i915))
11932 sanitize_watermarks(i915);
11937 /* part #3: call after gem init */
11938 int intel_modeset_init(struct drm_i915_private *i915)
11942 if (!HAS_DISPLAY(i915))
11946 * Force all active planes to recompute their states. So that on
11947 * mode_setcrtc after probe, all the intel_plane_state variables
11948 * are already calculated and there is no assert_plane warnings
11951 ret = intel_initial_commit(&i915->drm);
11953 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
11955 intel_overlay_setup(i915);
11957 ret = intel_fbdev_init(&i915->drm);
11961 /* Only enable hotplug handling once the fbdev is fully set up. */
11962 intel_hpd_init(i915);
11963 intel_hpd_poll_disable(i915);
11965 intel_init_ipc(i915);
11970 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11972 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11973 /* 640x480@60Hz, ~25175 kHz */
11974 struct dpll clock = {
11984 drm_WARN_ON(&dev_priv->drm,
11985 i9xx_calc_dpll_params(48000, &clock) != 25154);
11987 drm_dbg_kms(&dev_priv->drm,
11988 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
11989 pipe_name(pipe), clock.vco, clock.dot);
11991 fp = i9xx_dpll_compute_fp(&clock);
11992 dpll = DPLL_DVO_2X_MODE |
11993 DPLL_VGA_MODE_DIS |
11994 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
11995 PLL_P2_DIVIDE_BY_4 |
11996 PLL_REF_INPUT_DREFCLK |
11999 intel_de_write(dev_priv, FP0(pipe), fp);
12000 intel_de_write(dev_priv, FP1(pipe), fp);
12002 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12003 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12004 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12005 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12006 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12007 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12008 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12011 * Apparently we need to have VGA mode enabled prior to changing
12012 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12013 * dividers, even though the register value does change.
12015 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12016 intel_de_write(dev_priv, DPLL(pipe), dpll);
12018 /* Wait for the clocks to stabilize. */
12019 intel_de_posting_read(dev_priv, DPLL(pipe));
12022 /* The pixel multiplier can only be updated once the
12023 * DPLL is enabled and the clocks are stable.
12025 * So write it again.
12027 intel_de_write(dev_priv, DPLL(pipe), dpll);
12029 /* We do this three times for luck */
12030 for (i = 0; i < 3 ; i++) {
12031 intel_de_write(dev_priv, DPLL(pipe), dpll);
12032 intel_de_posting_read(dev_priv, DPLL(pipe));
12033 udelay(150); /* wait for warmup */
12036 intel_de_write(dev_priv, PIPECONF(pipe),
12037 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12038 intel_de_posting_read(dev_priv, PIPECONF(pipe));
12040 intel_wait_for_pipe_scanline_moving(crtc);
12043 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12045 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12047 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12050 drm_WARN_ON(&dev_priv->drm,
12051 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12052 DISPLAY_PLANE_ENABLE);
12053 drm_WARN_ON(&dev_priv->drm,
12054 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12055 DISPLAY_PLANE_ENABLE);
12056 drm_WARN_ON(&dev_priv->drm,
12057 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12058 DISPLAY_PLANE_ENABLE);
12059 drm_WARN_ON(&dev_priv->drm,
12060 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12061 drm_WARN_ON(&dev_priv->drm,
12062 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12064 intel_de_write(dev_priv, PIPECONF(pipe), 0);
12065 intel_de_posting_read(dev_priv, PIPECONF(pipe));
12067 intel_wait_for_pipe_scanline_stopped(crtc);
12069 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12070 intel_de_posting_read(dev_priv, DPLL(pipe));
12074 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12076 struct intel_crtc *crtc;
12078 if (DISPLAY_VER(dev_priv) >= 4)
12081 for_each_intel_crtc(&dev_priv->drm, crtc) {
12082 struct intel_plane *plane =
12083 to_intel_plane(crtc->base.primary);
12084 struct intel_crtc *plane_crtc;
12087 if (!plane->get_hw_state(plane, &pipe))
12090 if (pipe == crtc->pipe)
12093 drm_dbg_kms(&dev_priv->drm,
12094 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12095 plane->base.base.id, plane->base.name);
12097 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12098 intel_plane_disable_noatomic(plane_crtc, plane);
12102 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12104 struct drm_device *dev = crtc->base.dev;
12105 struct intel_encoder *encoder;
12107 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12113 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12115 struct drm_device *dev = encoder->base.dev;
12116 struct intel_connector *connector;
12118 for_each_connector_on_encoder(dev, &encoder->base, connector)
12124 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12125 enum pipe pch_transcoder)
12127 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12128 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12131 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12133 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12135 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12137 if (DISPLAY_VER(dev_priv) >= 9 ||
12138 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12139 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12142 if (transcoder_is_dsi(cpu_transcoder))
12145 val = intel_de_read(dev_priv, reg);
12146 val &= ~HSW_FRAME_START_DELAY_MASK;
12147 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12148 intel_de_write(dev_priv, reg, val);
12150 i915_reg_t reg = PIPECONF(cpu_transcoder);
12153 val = intel_de_read(dev_priv, reg);
12154 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12155 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12156 intel_de_write(dev_priv, reg, val);
12159 if (!crtc_state->has_pch_encoder)
12162 if (HAS_PCH_IBX(dev_priv)) {
12163 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12166 val = intel_de_read(dev_priv, reg);
12167 val &= ~TRANS_FRAME_START_DELAY_MASK;
12168 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12169 intel_de_write(dev_priv, reg, val);
12171 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12172 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12175 val = intel_de_read(dev_priv, reg);
12176 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12177 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12178 intel_de_write(dev_priv, reg, val);
12182 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12183 struct drm_modeset_acquire_ctx *ctx)
12185 struct drm_device *dev = crtc->base.dev;
12186 struct drm_i915_private *dev_priv = to_i915(dev);
12187 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12189 if (crtc_state->hw.active) {
12190 struct intel_plane *plane;
12192 /* Clear any frame start delays used for debugging left by the BIOS */
12193 intel_sanitize_frame_start_delay(crtc_state);
12195 /* Disable everything but the primary plane */
12196 for_each_intel_plane_on_crtc(dev, crtc, plane) {
12197 const struct intel_plane_state *plane_state =
12198 to_intel_plane_state(plane->base.state);
12200 if (plane_state->uapi.visible &&
12201 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12202 intel_plane_disable_noatomic(crtc, plane);
12206 * Disable any background color set by the BIOS, but enable the
12207 * gamma and CSC to match how we program our planes.
12209 if (DISPLAY_VER(dev_priv) >= 9)
12210 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12211 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12214 /* Adjust the state of the output pipe according to whether we
12215 * have active connectors/encoders. */
12216 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12217 !crtc_state->bigjoiner_slave)
12218 intel_crtc_disable_noatomic(crtc, ctx);
12220 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12222 * We start out with underrun reporting disabled to avoid races.
12223 * For correct bookkeeping mark this on active crtcs.
12225 * Also on gmch platforms we dont have any hardware bits to
12226 * disable the underrun reporting. Which means we need to start
12227 * out with underrun reporting disabled also on inactive pipes,
12228 * since otherwise we'll complain about the garbage we read when
12229 * e.g. coming up after runtime pm.
12231 * No protection against concurrent access is required - at
12232 * worst a fifo underrun happens which also sets this to false.
12234 crtc->cpu_fifo_underrun_disabled = true;
12236 * We track the PCH trancoder underrun reporting state
12237 * within the crtc. With crtc for pipe A housing the underrun
12238 * reporting state for PCH transcoder A, crtc for pipe B housing
12239 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12240 * and marking underrun reporting as disabled for the non-existing
12241 * PCH transcoders B and C would prevent enabling the south
12242 * error interrupt (see cpt_can_enable_serr_int()).
12244 if (has_pch_trancoder(dev_priv, crtc->pipe))
12245 crtc->pch_fifo_underrun_disabled = true;
12249 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12251 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12254 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12255 * the hardware when a high res displays plugged in. DPLL P
12256 * divider is zero, and the pipe timings are bonkers. We'll
12257 * try to disable everything in that case.
12259 * FIXME would be nice to be able to sanitize this state
12260 * without several WARNs, but for now let's take the easy
12263 return IS_SANDYBRIDGE(dev_priv) &&
12264 crtc_state->hw.active &&
12265 crtc_state->shared_dpll &&
12266 crtc_state->port_clock == 0;
12269 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12271 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12272 struct intel_connector *connector;
12273 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12274 struct intel_crtc_state *crtc_state = crtc ?
12275 to_intel_crtc_state(crtc->base.state) : NULL;
12277 /* We need to check both for a crtc link (meaning that the
12278 * encoder is active and trying to read from a pipe) and the
12279 * pipe itself being active. */
12280 bool has_active_crtc = crtc_state &&
12281 crtc_state->hw.active;
12283 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12284 drm_dbg_kms(&dev_priv->drm,
12285 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12286 pipe_name(crtc->pipe));
12287 has_active_crtc = false;
12290 connector = intel_encoder_find_connector(encoder);
12291 if (connector && !has_active_crtc) {
12292 drm_dbg_kms(&dev_priv->drm,
12293 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12294 encoder->base.base.id,
12295 encoder->base.name);
12297 /* Connector is active, but has no active pipe. This is
12298 * fallout from our resume register restoring. Disable
12299 * the encoder manually again. */
12301 struct drm_encoder *best_encoder;
12303 drm_dbg_kms(&dev_priv->drm,
12304 "[ENCODER:%d:%s] manually disabled\n",
12305 encoder->base.base.id,
12306 encoder->base.name);
12308 /* avoid oopsing in case the hooks consult best_encoder */
12309 best_encoder = connector->base.state->best_encoder;
12310 connector->base.state->best_encoder = &encoder->base;
12312 /* FIXME NULL atomic state passed! */
12313 if (encoder->disable)
12314 encoder->disable(NULL, encoder, crtc_state,
12315 connector->base.state);
12316 if (encoder->post_disable)
12317 encoder->post_disable(NULL, encoder, crtc_state,
12318 connector->base.state);
12320 connector->base.state->best_encoder = best_encoder;
12322 encoder->base.crtc = NULL;
12324 /* Inconsistent output/port/pipe state happens presumably due to
12325 * a bug in one of the get_hw_state functions. Or someplace else
12326 * in our code, like the register restore mess on resume. Clamp
12327 * things to off as a safer default. */
12329 connector->base.dpms = DRM_MODE_DPMS_OFF;
12330 connector->base.encoder = NULL;
12333 /* notify opregion of the sanitized encoder state */
12334 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12336 if (HAS_DDI(dev_priv))
12337 intel_ddi_sanitize_encoder_pll_mapping(encoder);
12340 /* FIXME read out full plane state for all planes */
12341 static void readout_plane_state(struct drm_i915_private *dev_priv)
12343 struct intel_plane *plane;
12344 struct intel_crtc *crtc;
12346 for_each_intel_plane(&dev_priv->drm, plane) {
12347 struct intel_plane_state *plane_state =
12348 to_intel_plane_state(plane->base.state);
12349 struct intel_crtc_state *crtc_state;
12350 enum pipe pipe = PIPE_A;
12353 visible = plane->get_hw_state(plane, &pipe);
12355 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12356 crtc_state = to_intel_crtc_state(crtc->base.state);
12358 intel_set_plane_visible(crtc_state, plane_state, visible);
12360 drm_dbg_kms(&dev_priv->drm,
12361 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12362 plane->base.base.id, plane->base.name,
12363 enableddisabled(visible), pipe_name(pipe));
12366 for_each_intel_crtc(&dev_priv->drm, crtc) {
12367 struct intel_crtc_state *crtc_state =
12368 to_intel_crtc_state(crtc->base.state);
12370 fixup_plane_bitmasks(crtc_state);
12374 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12376 struct drm_i915_private *dev_priv = to_i915(dev);
12377 struct intel_cdclk_state *cdclk_state =
12378 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12379 struct intel_dbuf_state *dbuf_state =
12380 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12382 struct intel_crtc *crtc;
12383 struct intel_encoder *encoder;
12384 struct intel_connector *connector;
12385 struct drm_connector_list_iter conn_iter;
12386 u8 active_pipes = 0;
12388 for_each_intel_crtc(dev, crtc) {
12389 struct intel_crtc_state *crtc_state =
12390 to_intel_crtc_state(crtc->base.state);
12392 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12393 intel_crtc_free_hw_state(crtc_state);
12394 intel_crtc_state_reset(crtc_state, crtc);
12396 intel_crtc_get_pipe_config(crtc_state);
12398 crtc_state->hw.enable = crtc_state->hw.active;
12400 crtc->base.enabled = crtc_state->hw.enable;
12401 crtc->active = crtc_state->hw.active;
12403 if (crtc_state->hw.active)
12404 active_pipes |= BIT(crtc->pipe);
12406 drm_dbg_kms(&dev_priv->drm,
12407 "[CRTC:%d:%s] hw state readout: %s\n",
12408 crtc->base.base.id, crtc->base.name,
12409 enableddisabled(crtc_state->hw.active));
12412 dev_priv->active_pipes = cdclk_state->active_pipes =
12413 dbuf_state->active_pipes = active_pipes;
12415 readout_plane_state(dev_priv);
12417 for_each_intel_encoder(dev, encoder) {
12420 if (encoder->get_hw_state(encoder, &pipe)) {
12421 struct intel_crtc_state *crtc_state;
12423 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12424 crtc_state = to_intel_crtc_state(crtc->base.state);
12426 encoder->base.crtc = &crtc->base;
12427 intel_encoder_get_config(encoder, crtc_state);
12428 if (encoder->sync_state)
12429 encoder->sync_state(encoder, crtc_state);
12431 /* read out to slave crtc as well for bigjoiner */
12432 if (crtc_state->bigjoiner) {
12433 /* encoder should read be linked to bigjoiner master */
12434 WARN_ON(crtc_state->bigjoiner_slave);
12436 crtc = crtc_state->bigjoiner_linked_crtc;
12437 crtc_state = to_intel_crtc_state(crtc->base.state);
12438 intel_encoder_get_config(encoder, crtc_state);
12441 encoder->base.crtc = NULL;
12444 drm_dbg_kms(&dev_priv->drm,
12445 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12446 encoder->base.base.id, encoder->base.name,
12447 enableddisabled(encoder->base.crtc),
12451 intel_dpll_readout_hw_state(dev_priv);
12453 drm_connector_list_iter_begin(dev, &conn_iter);
12454 for_each_intel_connector_iter(connector, &conn_iter) {
12455 if (connector->get_hw_state(connector)) {
12456 struct intel_crtc_state *crtc_state;
12457 struct intel_crtc *crtc;
12459 connector->base.dpms = DRM_MODE_DPMS_ON;
12461 encoder = intel_attached_encoder(connector);
12462 connector->base.encoder = &encoder->base;
12464 crtc = to_intel_crtc(encoder->base.crtc);
12465 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12467 if (crtc_state && crtc_state->hw.active) {
12469 * This has to be done during hardware readout
12470 * because anything calling .crtc_disable may
12471 * rely on the connector_mask being accurate.
12473 crtc_state->uapi.connector_mask |=
12474 drm_connector_mask(&connector->base);
12475 crtc_state->uapi.encoder_mask |=
12476 drm_encoder_mask(&encoder->base);
12479 connector->base.dpms = DRM_MODE_DPMS_OFF;
12480 connector->base.encoder = NULL;
12482 drm_dbg_kms(&dev_priv->drm,
12483 "[CONNECTOR:%d:%s] hw state readout: %s\n",
12484 connector->base.base.id, connector->base.name,
12485 enableddisabled(connector->base.encoder));
12487 drm_connector_list_iter_end(&conn_iter);
12489 for_each_intel_crtc(dev, crtc) {
12490 struct intel_bw_state *bw_state =
12491 to_intel_bw_state(dev_priv->bw_obj.state);
12492 struct intel_crtc_state *crtc_state =
12493 to_intel_crtc_state(crtc->base.state);
12494 struct intel_plane *plane;
12497 if (crtc_state->bigjoiner_slave)
12500 if (crtc_state->hw.active) {
12502 * The initial mode needs to be set in order to keep
12503 * the atomic core happy. It wants a valid mode if the
12504 * crtc's enabled, so we do the above call.
12506 * But we don't set all the derived state fully, hence
12507 * set a flag to indicate that a full recalculation is
12508 * needed on the next commit.
12510 crtc_state->inherited = true;
12512 intel_crtc_update_active_timings(crtc_state);
12514 intel_crtc_copy_hw_to_uapi_state(crtc_state);
12517 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12518 const struct intel_plane_state *plane_state =
12519 to_intel_plane_state(plane->base.state);
12522 * FIXME don't have the fb yet, so can't
12523 * use intel_plane_data_rate() :(
12525 if (plane_state->uapi.visible)
12526 crtc_state->data_rate[plane->id] =
12527 4 * crtc_state->pixel_rate;
12529 * FIXME don't have the fb yet, so can't
12530 * use plane->min_cdclk() :(
12532 if (plane_state->uapi.visible && plane->min_cdclk) {
12533 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12534 crtc_state->min_cdclk[plane->id] =
12535 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12537 crtc_state->min_cdclk[plane->id] =
12538 crtc_state->pixel_rate;
12540 drm_dbg_kms(&dev_priv->drm,
12541 "[PLANE:%d:%s] min_cdclk %d kHz\n",
12542 plane->base.base.id, plane->base.name,
12543 crtc_state->min_cdclk[plane->id]);
12546 if (crtc_state->hw.active) {
12547 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12548 if (drm_WARN_ON(dev, min_cdclk < 0))
12552 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12553 cdclk_state->min_voltage_level[crtc->pipe] =
12554 crtc_state->min_voltage_level;
12556 intel_bw_crtc_update(bw_state, crtc_state);
12558 intel_pipe_config_sanity_check(dev_priv, crtc_state);
12560 /* discard our incomplete slave state, copy it from master */
12561 if (crtc_state->bigjoiner && crtc_state->hw.active) {
12562 struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12563 struct intel_crtc_state *slave_crtc_state =
12564 to_intel_crtc_state(slave->base.state);
12566 copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12567 slave->base.mode = crtc->base.mode;
12569 cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12570 cdclk_state->min_voltage_level[slave->pipe] =
12571 crtc_state->min_voltage_level;
12573 for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12574 const struct intel_plane_state *plane_state =
12575 to_intel_plane_state(plane->base.state);
12578 * FIXME don't have the fb yet, so can't
12579 * use intel_plane_data_rate() :(
12581 if (plane_state->uapi.visible)
12582 crtc_state->data_rate[plane->id] =
12583 4 * crtc_state->pixel_rate;
12585 crtc_state->data_rate[plane->id] = 0;
12588 intel_bw_crtc_update(bw_state, slave_crtc_state);
12589 drm_calc_timestamping_constants(&slave->base,
12590 &slave_crtc_state->hw.adjusted_mode);
12596 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12598 struct intel_encoder *encoder;
12600 for_each_intel_encoder(&dev_priv->drm, encoder) {
12601 struct intel_crtc_state *crtc_state;
12603 if (!encoder->get_power_domains)
12607 * MST-primary and inactive encoders don't have a crtc state
12608 * and neither of these require any power domain references.
12610 if (!encoder->base.crtc)
12613 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
12614 encoder->get_power_domains(encoder, crtc_state);
12618 static void intel_early_display_was(struct drm_i915_private *dev_priv)
12621 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
12622 * Also known as Wa_14010480278.
12624 if (IS_DISPLAY_VER(dev_priv, 10, 12))
12625 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
12626 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
12628 if (IS_HASWELL(dev_priv)) {
12630 * WaRsPkgCStateDisplayPMReq:hsw
12631 * System hang if this isn't done before disabling all planes!
12633 intel_de_write(dev_priv, CHICKEN_PAR1_1,
12634 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
12637 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
12638 /* Display WA #1142:kbl,cfl,cml */
12639 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
12640 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
12641 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
12642 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
12643 KBL_ARB_FILL_SPARE_14);
12647 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
12648 enum port port, i915_reg_t hdmi_reg)
12650 u32 val = intel_de_read(dev_priv, hdmi_reg);
12652 if (val & SDVO_ENABLE ||
12653 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
12656 drm_dbg_kms(&dev_priv->drm,
12657 "Sanitizing transcoder select for HDMI %c\n",
12660 val &= ~SDVO_PIPE_SEL_MASK;
12661 val |= SDVO_PIPE_SEL(PIPE_A);
12663 intel_de_write(dev_priv, hdmi_reg, val);
12666 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
12667 enum port port, i915_reg_t dp_reg)
12669 u32 val = intel_de_read(dev_priv, dp_reg);
12671 if (val & DP_PORT_EN ||
12672 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
12675 drm_dbg_kms(&dev_priv->drm,
12676 "Sanitizing transcoder select for DP %c\n",
12679 val &= ~DP_PIPE_SEL_MASK;
12680 val |= DP_PIPE_SEL(PIPE_A);
12682 intel_de_write(dev_priv, dp_reg, val);
12685 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
12688 * The BIOS may select transcoder B on some of the PCH
12689 * ports even it doesn't enable the port. This would trip
12690 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
12691 * Sanitize the transcoder select bits to prevent that. We
12692 * assume that the BIOS never actually enabled the port,
12693 * because if it did we'd actually have to toggle the port
12694 * on and back off to make the transcoder A select stick
12695 * (see. intel_dp_link_down(), intel_disable_hdmi(),
12696 * intel_disable_sdvo()).
12698 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
12699 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
12700 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
12702 /* PCH SDVOB multiplex with HDMIB */
12703 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
12704 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
12705 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
12708 /* Scan out the current hw modeset state,
12709 * and sanitizes it to the current state
12712 intel_modeset_setup_hw_state(struct drm_device *dev,
12713 struct drm_modeset_acquire_ctx *ctx)
12715 struct drm_i915_private *dev_priv = to_i915(dev);
12716 struct intel_encoder *encoder;
12717 struct intel_crtc *crtc;
12718 intel_wakeref_t wakeref;
12720 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
12722 intel_early_display_was(dev_priv);
12723 intel_modeset_readout_hw_state(dev);
12725 /* HW state is read out, now we need to sanitize this mess. */
12727 /* Sanitize the TypeC port mode upfront, encoders depend on this */
12728 for_each_intel_encoder(dev, encoder) {
12729 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
12731 /* We need to sanitize only the MST primary port. */
12732 if (encoder->type != INTEL_OUTPUT_DP_MST &&
12733 intel_phy_is_tc(dev_priv, phy))
12734 intel_tc_port_sanitize(enc_to_dig_port(encoder));
12737 get_encoder_power_domains(dev_priv);
12739 if (HAS_PCH_IBX(dev_priv))
12740 ibx_sanitize_pch_ports(dev_priv);
12743 * intel_sanitize_plane_mapping() may need to do vblank
12744 * waits, so we need vblank interrupts restored beforehand.
12746 for_each_intel_crtc(&dev_priv->drm, crtc) {
12747 struct intel_crtc_state *crtc_state =
12748 to_intel_crtc_state(crtc->base.state);
12750 drm_crtc_vblank_reset(&crtc->base);
12752 if (crtc_state->hw.active)
12753 intel_crtc_vblank_on(crtc_state);
12756 intel_sanitize_plane_mapping(dev_priv);
12758 for_each_intel_encoder(dev, encoder)
12759 intel_sanitize_encoder(encoder);
12761 for_each_intel_crtc(&dev_priv->drm, crtc) {
12762 struct intel_crtc_state *crtc_state =
12763 to_intel_crtc_state(crtc->base.state);
12765 intel_sanitize_crtc(crtc, ctx);
12766 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
12769 intel_modeset_update_connector_atomic_state(dev);
12771 intel_dpll_sanitize_state(dev_priv);
12773 if (IS_G4X(dev_priv)) {
12774 g4x_wm_get_hw_state(dev_priv);
12775 g4x_wm_sanitize(dev_priv);
12776 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12777 vlv_wm_get_hw_state(dev_priv);
12778 vlv_wm_sanitize(dev_priv);
12779 } else if (DISPLAY_VER(dev_priv) >= 9) {
12780 skl_wm_get_hw_state(dev_priv);
12781 } else if (HAS_PCH_SPLIT(dev_priv)) {
12782 ilk_wm_get_hw_state(dev_priv);
12785 for_each_intel_crtc(dev, crtc) {
12786 struct intel_crtc_state *crtc_state =
12787 to_intel_crtc_state(crtc->base.state);
12790 put_domains = modeset_get_crtc_power_domains(crtc_state);
12791 if (drm_WARN_ON(dev, put_domains))
12792 modeset_put_crtc_power_domains(crtc, put_domains);
12795 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
12798 void intel_display_resume(struct drm_device *dev)
12800 struct drm_i915_private *dev_priv = to_i915(dev);
12801 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
12802 struct drm_modeset_acquire_ctx ctx;
12805 dev_priv->modeset_restore_state = NULL;
12807 state->acquire_ctx = &ctx;
12809 drm_modeset_acquire_init(&ctx, 0);
12812 ret = drm_modeset_lock_all_ctx(dev, &ctx);
12813 if (ret != -EDEADLK)
12816 drm_modeset_backoff(&ctx);
12820 ret = __intel_display_resume(dev, state, &ctx);
12822 intel_enable_ipc(dev_priv);
12823 drm_modeset_drop_locks(&ctx);
12824 drm_modeset_acquire_fini(&ctx);
12827 drm_err(&dev_priv->drm,
12828 "Restoring old state failed with %i\n", ret);
12830 drm_atomic_state_put(state);
12833 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
12835 struct intel_connector *connector;
12836 struct drm_connector_list_iter conn_iter;
12838 /* Kill all the work that may have been queued by hpd. */
12839 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
12840 for_each_intel_connector_iter(connector, &conn_iter) {
12841 if (connector->modeset_retry_work.func)
12842 cancel_work_sync(&connector->modeset_retry_work);
12843 if (connector->hdcp.shim) {
12844 cancel_delayed_work_sync(&connector->hdcp.check_work);
12845 cancel_work_sync(&connector->hdcp.prop_work);
12848 drm_connector_list_iter_end(&conn_iter);
12851 /* part #1: call before irq uninstall */
12852 void intel_modeset_driver_remove(struct drm_i915_private *i915)
12854 flush_workqueue(i915->flip_wq);
12855 flush_workqueue(i915->modeset_wq);
12857 flush_work(&i915->atomic_helper.free_work);
12858 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
12861 /* part #2: call after irq uninstall */
12862 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
12865 * Due to the hpd irq storm handling the hotplug work can re-arm the
12866 * poll handlers. Hence disable polling after hpd handling is shut down.
12868 intel_hpd_poll_fini(i915);
12871 * MST topology needs to be suspended so we don't have any calls to
12872 * fbdev after it's finalized. MST will be destroyed later as part of
12873 * drm_mode_config_cleanup()
12875 intel_dp_mst_suspend(i915);
12877 /* poll work can call into fbdev, hence clean that up afterwards */
12878 intel_fbdev_fini(i915);
12880 intel_unregister_dsm_handler();
12882 intel_fbc_global_disable(i915);
12884 /* flush any delayed tasks or pending work */
12885 flush_scheduled_work();
12887 intel_hdcp_component_fini(i915);
12889 intel_mode_config_cleanup(i915);
12891 intel_overlay_cleanup(i915);
12893 intel_gmbus_teardown(i915);
12895 destroy_workqueue(i915->flip_wq);
12896 destroy_workqueue(i915->modeset_wq);
12898 intel_fbc_cleanup_cfb(i915);
12901 /* part #3: call after gem init */
12902 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
12904 intel_csr_ucode_fini(i915);
12906 intel_power_domains_driver_remove(i915);
12908 intel_vga_unregister(i915);
12910 intel_bios_driver_remove(i915);
12913 void intel_display_driver_register(struct drm_i915_private *i915)
12915 if (!HAS_DISPLAY(i915))
12918 intel_display_debugfs_register(i915);
12920 /* Must be done after probing outputs */
12921 intel_opregion_register(i915);
12922 acpi_video_register();
12924 intel_audio_init(i915);
12927 * Some ports require correctly set-up hpd registers for
12928 * detection to work properly (leading to ghost connected
12929 * connector status), e.g. VGA on gm45. Hence we can only set
12930 * up the initial fbdev config after hpd irqs are fully
12931 * enabled. We do it last so that the async config cannot run
12932 * before the connectors are registered.
12934 intel_fbdev_initial_config_async(&i915->drm);
12937 * We need to coordinate the hotplugs with the asynchronous
12938 * fbdev configuration, for which we use the
12939 * fbdev->async_cookie.
12941 drm_kms_helper_poll_init(&i915->drm);
12944 void intel_display_driver_unregister(struct drm_i915_private *i915)
12946 if (!HAS_DISPLAY(i915))
12949 intel_fbdev_unregister(i915);
12950 intel_audio_deinit(i915);
12953 * After flushing the fbdev (incl. a late async config which
12954 * will have delayed queuing of a hotplug event), then flush
12955 * the hotplug events.
12957 drm_kms_helper_poll_fini(&i915->drm);
12958 drm_atomic_helper_shutdown(&i915->drm);
12960 acpi_video_unregister();
12961 intel_opregion_unregister(i915);
12964 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
12966 struct intel_display_error_state {
12968 u32 power_well_driver;
12970 struct intel_cursor_error_state {
12975 } cursor[I915_MAX_PIPES];
12977 struct intel_pipe_error_state {
12978 bool power_domain_on;
12981 } pipe[I915_MAX_PIPES];
12983 struct intel_plane_error_state {
12991 } plane[I915_MAX_PIPES];
12993 struct intel_transcoder_error_state {
12995 bool power_domain_on;
12996 enum transcoder cpu_transcoder;
13009 struct intel_display_error_state *
13010 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
13012 struct intel_display_error_state *error;
13013 int transcoders[] = {
13022 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
13024 if (!HAS_DISPLAY(dev_priv))
13027 error = kzalloc(sizeof(*error), GFP_ATOMIC);
13031 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13032 error->power_well_driver = intel_de_read(dev_priv,
13033 HSW_PWR_WELL_CTL2);
13035 for_each_pipe(dev_priv, i) {
13036 error->pipe[i].power_domain_on =
13037 __intel_display_power_is_enabled(dev_priv,
13038 POWER_DOMAIN_PIPE(i));
13039 if (!error->pipe[i].power_domain_on)
13042 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
13043 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
13044 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
13046 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
13047 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
13048 if (DISPLAY_VER(dev_priv) <= 3) {
13049 error->plane[i].size = intel_de_read(dev_priv,
13051 error->plane[i].pos = intel_de_read(dev_priv,
13054 if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13055 error->plane[i].addr = intel_de_read(dev_priv,
13057 if (DISPLAY_VER(dev_priv) >= 4) {
13058 error->plane[i].surface = intel_de_read(dev_priv,
13060 error->plane[i].tile_offset = intel_de_read(dev_priv,
13064 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
13066 if (HAS_GMCH(dev_priv))
13067 error->pipe[i].stat = intel_de_read(dev_priv,
13071 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13072 enum transcoder cpu_transcoder = transcoders[i];
13074 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
13077 error->transcoder[i].available = true;
13078 error->transcoder[i].power_domain_on =
13079 __intel_display_power_is_enabled(dev_priv,
13080 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13081 if (!error->transcoder[i].power_domain_on)
13084 error->transcoder[i].cpu_transcoder = cpu_transcoder;
13086 error->transcoder[i].conf = intel_de_read(dev_priv,
13087 PIPECONF(cpu_transcoder));
13088 error->transcoder[i].htotal = intel_de_read(dev_priv,
13089 HTOTAL(cpu_transcoder));
13090 error->transcoder[i].hblank = intel_de_read(dev_priv,
13091 HBLANK(cpu_transcoder));
13092 error->transcoder[i].hsync = intel_de_read(dev_priv,
13093 HSYNC(cpu_transcoder));
13094 error->transcoder[i].vtotal = intel_de_read(dev_priv,
13095 VTOTAL(cpu_transcoder));
13096 error->transcoder[i].vblank = intel_de_read(dev_priv,
13097 VBLANK(cpu_transcoder));
13098 error->transcoder[i].vsync = intel_de_read(dev_priv,
13099 VSYNC(cpu_transcoder));
13105 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13108 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13109 struct intel_display_error_state *error)
13111 struct drm_i915_private *dev_priv = m->i915;
13117 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
13118 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13119 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13120 error->power_well_driver);
13121 for_each_pipe(dev_priv, i) {
13122 err_printf(m, "Pipe [%d]:\n", i);
13123 err_printf(m, " Power: %s\n",
13124 onoff(error->pipe[i].power_domain_on));
13125 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
13126 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
13128 err_printf(m, "Plane [%d]:\n", i);
13129 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
13130 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
13131 if (DISPLAY_VER(dev_priv) <= 3) {
13132 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
13133 err_printf(m, " POS: %08x\n", error->plane[i].pos);
13135 if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13136 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
13137 if (DISPLAY_VER(dev_priv) >= 4) {
13138 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
13139 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
13142 err_printf(m, "Cursor [%d]:\n", i);
13143 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
13144 err_printf(m, " POS: %08x\n", error->cursor[i].position);
13145 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
13148 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13149 if (!error->transcoder[i].available)
13152 err_printf(m, "CPU transcoder: %s\n",
13153 transcoder_name(error->transcoder[i].cpu_transcoder));
13154 err_printf(m, " Power: %s\n",
13155 onoff(error->transcoder[i].power_domain_on));
13156 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
13157 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
13158 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
13159 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
13160 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
13161 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
13162 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);